Skip to content

Commit

Permalink
x86/sgx: Turn on per-cgroup EPC reclamation
Browse files Browse the repository at this point in the history
Previous patches have implemented all infrastructure needed for
per-cgroup EPC page tracking and reclaiming. But all reclaimable EPC
pages are still tracked in the global LRU as sgx_epc_page_lru() always
returns reference to the global LRU.

Change sgx_epc_page_lru() to return the LRU of the cgroup in which the
given EPC page is allocated.

Update sgx_can_reclaim_global(), to check emptiness of LRUs of all
cgroups, and update sgx_reclaim_pages_global(), to utilize
sgx_cgroup_reclaim_pages_global(), when EPC cgroup is enabled.

With these changes, the global reclamation and per-cgroup reclamation
both work properly with all pages tracked in per-cgroup LRUs.

Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Kristen Carlson Accardi <kristen@linux.intel.com>
Co-developed-by: Haitao Huang <haitao.huang@linux.intel.com>
Signed-off-by: Haitao Huang <haitao.huang@linux.intel.com>
Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
Reviewed-by: Kai Huang <kai.huang@intel.com>
Tested-by: Mikko Ylinen <mikko.ylinen@linux.intel.com>
---
V16:
- Separated out the global and direct reclamation to earlier patch.(Kai)

V14:
- Update global reclamation to use the new sgx_cgroup_reclaim_pages() to
iterate cgroups at lower level if the top cgroups are too busy.

V13:
- Use IS_ENABLED(CONFIG_CGROUP_MISC) in sgx_can_reclaim_global(). (Kai)

V12:
- Remove CONFIG_CGROUP_SGX_EPC, conditional compile SGX Cgroup for
CONFIGCONFIG_CGROUPMISC. (Jarkko)

V11:
- Reword the comments for global reclamation for allocation failure
after passing cgroup charging. (Kai)
- Add stub functions to remove ifdefs in c file (Kai)
- Add more detailed comments to clarify each page belongs to one cgroup, or the
root. (Kai)

V10:
- Add comment to clarify each page belongs to one cgroup, or the root by
default. (Kai)
- Merge the changes that expose sgx_cgroup_* functions to this patch.
- Add changes for sgx_reclaim_direct() that was missed previously.

V7:
- Split this out from the big patch, torvalds#10 in V6. (Dave, Kai)
  • Loading branch information
kaccardi authored and haitaohuang committed Aug 30, 2024
1 parent ec6d5c0 commit 7d6db43
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 17 deletions.
2 changes: 1 addition & 1 deletion arch/x86/kernel/cpu/sgx/epc_cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ static inline u64 sgx_cgroup_max_pages_to_root(struct sgx_cgroup *sgx_cg)
*
* Return: %true if all cgroups under the specified root have empty LRU lists.
*/
static bool sgx_cgroup_lru_empty(struct misc_cg *root)
bool sgx_cgroup_lru_empty(struct misc_cg *root)
{
struct cgroup_subsys_state *css_root;
struct cgroup_subsys_state *pos;
Expand Down
6 changes: 6 additions & 0 deletions arch/x86/kernel/cpu/sgx/epc_cgroup.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,11 @@ static inline int sgx_cgroup_try_charge(struct sgx_cgroup *sgx_cg, enum sgx_recl

static inline void sgx_cgroup_uncharge(struct sgx_cgroup *sgx_cg) { }

static inline bool sgx_cgroup_lru_empty(struct misc_cg *root)
{
return true;
}

static inline void __init sgx_cgroup_init(void) { }
static inline int __init sgx_cgroup_wq_init(void)
{
Expand Down Expand Up @@ -89,6 +94,7 @@ static inline void sgx_put_cg(struct sgx_cgroup *sgx_cg)

int sgx_cgroup_try_charge(struct sgx_cgroup *sgx_cg, enum sgx_reclaim reclaim);
void sgx_cgroup_uncharge(struct sgx_cgroup *sgx_cg);
bool sgx_cgroup_lru_empty(struct misc_cg *root);
void sgx_cgroup_reclaim_direct(void);
void sgx_cgroup_reclaim_pages_global(struct mm_struct *charge_mm);
void __init sgx_cgroup_init(void);
Expand Down
45 changes: 29 additions & 16 deletions arch/x86/kernel/cpu/sgx/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,24 +32,41 @@ static DEFINE_XARRAY(sgx_epc_address_space);
*/
static struct sgx_epc_lru_list sgx_global_lru;

/*
* Get the per-cgroup or global LRU list that tracks the given reclaimable page.
*/
static inline struct sgx_epc_lru_list *sgx_epc_page_lru(struct sgx_epc_page *epc_page)
{
#ifdef CONFIG_CGROUP_MISC
/*
* epc_page->sgx_cg here is never NULL during a reclaimable epc_page's
* life between sgx_alloc_epc_page() and sgx_free_epc_page():
*
* In sgx_alloc_epc_page(), epc_page->sgx_cg is set to the return from
* sgx_get_current_cg() which is the misc cgroup of the current task, or
* the root by default even if the misc cgroup is disabled by kernel
* command line.
*
* epc_page->sgx_cg is only unset by sgx_free_epc_page().
*
* This function is never used before sgx_alloc_epc_page() or after
* sgx_free_epc_page().
*/
return &epc_page->sgx_cg->lru;
#else
return &sgx_global_lru;
#endif
}

/*
* Check if there is any reclaimable page at global level.
*/
static inline bool sgx_can_reclaim_global(void)
{
/*
* Now all EPC pages are still tracked in the @sgx_global_lru, so only
* check @sgx_global_lru.
*
* When EPC pages are tracked in the actual per-cgroup LRUs,
* replace with sgx_cgroup_lru_empty(misc_cg_root()).
*/
return !list_empty(&sgx_global_lru.reclaimable);
if (IS_ENABLED(CONFIG_CGROUP_MISC))
return !sgx_cgroup_lru_empty(misc_cg_root());
else
return !list_empty(&sgx_global_lru.reclaimable);
}

static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0);
Expand Down Expand Up @@ -411,14 +428,10 @@ static bool sgx_should_reclaim_global(unsigned long watermark)

static void sgx_reclaim_pages_global(struct mm_struct *charge_mm)
{
/*
* Now all EPC pages are still tracked in the @sgx_global_lru.
* Still reclaim from it.
*
* When EPC pages are tracked in the actual per-cgroup LRUs,
* sgx_cgroup_reclaim_pages_global() will be called.
*/
sgx_reclaim_pages(&sgx_global_lru, charge_mm);
if (IS_ENABLED(CONFIG_CGROUP_MISC))
sgx_cgroup_reclaim_pages_global(charge_mm);
else
sgx_reclaim_pages(&sgx_global_lru, charge_mm);
}

/*
Expand Down

0 comments on commit 7d6db43

Please sign in to comment.