mm/hmm: remove HMM_PFN_SPECIAL
This is just an alias for HMM_PFN_ERROR, nothing cares that the error was because of a special page vs any other error case. Link: https://lore.kernel.org/r/4-v2-b4e84f444c7d+24f57-hmm_no_flags_jgg@mellanox.com Acked-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
4e2490843d
commit
5c8f3c4cf1
4 changed files with 1 additions and 11 deletions
|
@ -775,7 +775,6 @@ static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
|
|||
static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
|
||||
0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
|
||||
0, /* HMM_PFN_NONE */
|
||||
0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -379,7 +379,6 @@ static const u64
|
|||
nouveau_svm_pfn_values[HMM_PFN_VALUE_MAX] = {
|
||||
[HMM_PFN_ERROR ] = ~NVIF_VMM_PFNMAP_V0_V,
|
||||
[HMM_PFN_NONE ] = NVIF_VMM_PFNMAP_V0_NONE,
|
||||
[HMM_PFN_SPECIAL] = ~NVIF_VMM_PFNMAP_V0_V,
|
||||
};
|
||||
|
||||
/* Issue fault replay for GPU to retry accesses that faulted previously. */
|
||||
|
|
|
@ -44,10 +44,6 @@ enum hmm_pfn_flag_e {
|
|||
* Flags:
|
||||
* HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
|
||||
* HMM_PFN_NONE: corresponding CPU page table entry is pte_none()
|
||||
* HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
|
||||
* result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not
|
||||
* be mirrored by a device, because the entry will never have HMM_PFN_VALID
|
||||
* set and the pfn value is undefined.
|
||||
*
|
||||
* Driver provides values for none entry, error entry, and special entry.
|
||||
* Driver can alias (i.e., use same value) error and special, but
|
||||
|
@ -56,12 +52,10 @@ enum hmm_pfn_flag_e {
|
|||
* HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be:
|
||||
* hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous,
|
||||
* hmm_range.values[HMM_PFN_NONE] if there is no CPU page table entry,
|
||||
* hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one
|
||||
*/
|
||||
enum hmm_pfn_value_e {
|
||||
HMM_PFN_ERROR,
|
||||
HMM_PFN_NONE,
|
||||
HMM_PFN_SPECIAL,
|
||||
HMM_PFN_VALUE_MAX
|
||||
};
|
||||
|
||||
|
@ -110,8 +104,6 @@ static inline struct page *hmm_device_entry_to_page(const struct hmm_range *rang
|
|||
return NULL;
|
||||
if (entry == range->values[HMM_PFN_ERROR])
|
||||
return NULL;
|
||||
if (entry == range->values[HMM_PFN_SPECIAL])
|
||||
return NULL;
|
||||
if (!(entry & range->flags[HMM_PFN_VALID]))
|
||||
return NULL;
|
||||
return pfn_to_page(entry >> range->pfn_shift);
|
||||
|
|
2
mm/hmm.c
2
mm/hmm.c
|
@ -301,7 +301,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
|
|||
pte_unmap(ptep);
|
||||
return -EFAULT;
|
||||
}
|
||||
*pfn = range->values[HMM_PFN_SPECIAL];
|
||||
*pfn = range->values[HMM_PFN_ERROR];
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue