Skip to content
Snippets Groups Projects
Commit 4ed0d3e6 authored by Fenghua Yu's avatar Fenghua Yu Committed by David Woodhouse
Browse files

Intel IOMMU Pass Through Support


The patch adds kernel parameter intel_iommu=pt to set up pass through
mode in context mapping entry. This disables DMAR in linux kernel; but
KVM still runs on VT-d and interrupt remapping still works.

In this mode, kernel uses swiotlb for DMA API functions but other VT-d
functionalities are enabled for KVM. KVM always uses multi level
translation page table in VT-d. By default, pass though mode is disabled
in kernel.

This is useful when people don't want to enable VT-d DMAR in kernel but
still want to use KVM and interrupt remapping for reasons like DMAR
performance concern or debug purpose.

Signed-off-by: default avatarFenghua Yu <fenghua.yu@intel.com>
Acked-by: default avatarWeidong Han <weidong@intel.com>
Signed-off-by: default avatarDavid Woodhouse <David.Woodhouse@intel.com>
parent 09106974
No related branches found
No related tags found
No related merge requests found
...@@ -965,6 +965,7 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -965,6 +965,7 @@ and is between 256 and 4096 characters. It is defined in the file
nomerge nomerge
forcesac forcesac
soft soft
pt [x86, IA64]
io7= [HW] IO7 for Marvel based alpha systems io7= [HW] IO7 for Marvel based alpha systems
See comment before marvel_specify_io7 in See comment before marvel_specify_io7 in
......
...@@ -9,6 +9,7 @@ extern void pci_iommu_shutdown(void); ...@@ -9,6 +9,7 @@ extern void pci_iommu_shutdown(void);
extern void no_iommu_init(void); extern void no_iommu_init(void);
extern int force_iommu, no_iommu; extern int force_iommu, no_iommu;
extern int iommu_detected; extern int iommu_detected;
extern int iommu_pass_through;
extern void iommu_dma_init(void); extern void iommu_dma_init(void);
extern void machvec_init(const char *name); extern void machvec_init(const char *name);
......
...@@ -46,7 +46,7 @@ void __init swiotlb_dma_init(void) ...@@ -46,7 +46,7 @@ void __init swiotlb_dma_init(void)
void __init pci_swiotlb_init(void) void __init pci_swiotlb_init(void)
{ {
if (!iommu_detected) { if (!iommu_detected || iommu_pass_through) {
#ifdef CONFIG_IA64_GENERIC #ifdef CONFIG_IA64_GENERIC
swiotlb = 1; swiotlb = 1;
printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
......
...@@ -6,6 +6,7 @@ extern void no_iommu_init(void); ...@@ -6,6 +6,7 @@ extern void no_iommu_init(void);
extern struct dma_map_ops nommu_dma_ops; extern struct dma_map_ops nommu_dma_ops;
extern int force_iommu, no_iommu; extern int force_iommu, no_iommu;
extern int iommu_detected; extern int iommu_detected;
extern int iommu_pass_through;
/* 10 seconds */ /* 10 seconds */
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
......
...@@ -160,6 +160,8 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, ...@@ -160,6 +160,8 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
return page_address(page); return page_address(page);
} }
extern int iommu_pass_through;
/* /*
* See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
* documentation. * documentation.
...@@ -209,6 +211,10 @@ static __init int iommu_setup(char *p) ...@@ -209,6 +211,10 @@ static __init int iommu_setup(char *p)
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
if (!strncmp(p, "soft", 4)) if (!strncmp(p, "soft", 4))
swiotlb = 1; swiotlb = 1;
if (!strncmp(p, "pt", 2)) {
iommu_pass_through = 1;
return 1;
}
#endif #endif
gart_parse_options(p); gart_parse_options(p);
......
...@@ -71,7 +71,8 @@ void __init pci_swiotlb_init(void) ...@@ -71,7 +71,8 @@ void __init pci_swiotlb_init(void)
{ {
/* don't initialize swiotlb if iommu=off (no_iommu=1) */ /* don't initialize swiotlb if iommu=off (no_iommu=1) */
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) ||
iommu_pass_through)
swiotlb = 1; swiotlb = 1;
#endif #endif
if (swiotlb_force) if (swiotlb_force)
......
...@@ -515,6 +515,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -515,6 +515,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
u32 ver; u32 ver;
static int iommu_allocated = 0; static int iommu_allocated = 0;
int agaw = 0; int agaw = 0;
int msagaw = 0;
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu) if (!iommu)
...@@ -535,12 +536,20 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -535,12 +536,20 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
agaw = iommu_calculate_agaw(iommu); agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) { if (agaw < 0) {
printk(KERN_ERR printk(KERN_ERR
"Cannot get a valid agaw for iommu (seq_id = %d)\n", "Cannot get a valid agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
goto error;
}
msagaw = iommu_calculate_max_sagaw(iommu);
if (msagaw < 0) {
printk(KERN_ERR
"Cannot get a valid max agaw for iommu (seq_id = %d)\n",
iommu->seq_id); iommu->seq_id);
goto error; goto error;
} }
#endif #endif
iommu->agaw = agaw; iommu->agaw = agaw;
iommu->msagaw = msagaw;
/* the registers might be more than one page */ /* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
......
...@@ -53,6 +53,8 @@ ...@@ -53,6 +53,8 @@
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
#define MAX_AGAW_WIDTH 64
#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
...@@ -127,8 +129,6 @@ static inline void context_set_fault_enable(struct context_entry *context) ...@@ -127,8 +129,6 @@ static inline void context_set_fault_enable(struct context_entry *context)
context->lo &= (((u64)-1) << 2) | 1; context->lo &= (((u64)-1) << 2) | 1;
} }
#define CONTEXT_TT_MULTI_LEVEL 0
static inline void context_set_translation_type(struct context_entry *context, static inline void context_set_translation_type(struct context_entry *context,
unsigned long value) unsigned long value)
{ {
...@@ -288,6 +288,7 @@ int dmar_disabled = 1; ...@@ -288,6 +288,7 @@ int dmar_disabled = 1;
static int __initdata dmar_map_gfx = 1; static int __initdata dmar_map_gfx = 1;
static int dmar_forcedac; static int dmar_forcedac;
static int intel_iommu_strict; static int intel_iommu_strict;
int iommu_pass_through;
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
static DEFINE_SPINLOCK(device_domain_lock); static DEFINE_SPINLOCK(device_domain_lock);
...@@ -397,17 +398,13 @@ void free_iova_mem(struct iova *iova) ...@@ -397,17 +398,13 @@ void free_iova_mem(struct iova *iova)
static inline int width_to_agaw(int width); static inline int width_to_agaw(int width);
/* calculate agaw for each iommu. static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
* "SAGAW" may be different across iommus, use a default agaw, and
* get a supported less agaw for iommus that don't support the default agaw.
*/
int iommu_calculate_agaw(struct intel_iommu *iommu)
{ {
unsigned long sagaw; unsigned long sagaw;
int agaw = -1; int agaw = -1;
sagaw = cap_sagaw(iommu->cap); sagaw = cap_sagaw(iommu->cap);
for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH); for (agaw = width_to_agaw(max_gaw);
agaw >= 0; agaw--) { agaw >= 0; agaw--) {
if (test_bit(agaw, &sagaw)) if (test_bit(agaw, &sagaw))
break; break;
...@@ -416,6 +413,24 @@ int iommu_calculate_agaw(struct intel_iommu *iommu) ...@@ -416,6 +413,24 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
return agaw; return agaw;
} }
/*
* Calculate max SAGAW for each iommu.
*/
int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
{
return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
}
/*
* calculate agaw for each iommu.
* "SAGAW" may be different across iommus, use a default agaw, and
* get a supported less agaw for iommus that don't support the default agaw.
*/
int iommu_calculate_agaw(struct intel_iommu *iommu)
{
return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
}
/* in native case, each domain is related to only one iommu */ /* in native case, each domain is related to only one iommu */
static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
{ {
...@@ -1321,8 +1336,8 @@ static void domain_exit(struct dmar_domain *domain) ...@@ -1321,8 +1336,8 @@ static void domain_exit(struct dmar_domain *domain)
free_domain_mem(domain); free_domain_mem(domain);
} }
static int domain_context_mapping_one(struct dmar_domain *domain, static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
int segment, u8 bus, u8 devfn) u8 bus, u8 devfn, int translation)
{ {
struct context_entry *context; struct context_entry *context;
unsigned long flags; unsigned long flags;
...@@ -1335,7 +1350,10 @@ static int domain_context_mapping_one(struct dmar_domain *domain, ...@@ -1335,7 +1350,10 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
pr_debug("Set context mapping for %02x:%02x.%d\n", pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
BUG_ON(!domain->pgd); BUG_ON(!domain->pgd);
BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
translation != CONTEXT_TT_MULTI_LEVEL);
iommu = device_to_iommu(segment, bus, devfn); iommu = device_to_iommu(segment, bus, devfn);
if (!iommu) if (!iommu)
...@@ -1395,9 +1413,18 @@ static int domain_context_mapping_one(struct dmar_domain *domain, ...@@ -1395,9 +1413,18 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
} }
context_set_domain_id(context, id); context_set_domain_id(context, id);
context_set_address_width(context, iommu->agaw);
context_set_address_root(context, virt_to_phys(pgd)); /*
context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL); * In pass through mode, AW must be programmed to indicate the largest
* AGAW value supported by hardware. And ASR is ignored by hardware.
*/
if (likely(translation == CONTEXT_TT_MULTI_LEVEL)) {
context_set_address_width(context, iommu->agaw);
context_set_address_root(context, virt_to_phys(pgd));
} else
context_set_address_width(context, iommu->msagaw);
context_set_translation_type(context, translation);
context_set_fault_enable(context); context_set_fault_enable(context);
context_set_present(context); context_set_present(context);
domain_flush_cache(domain, context, sizeof(*context)); domain_flush_cache(domain, context, sizeof(*context));
...@@ -1422,13 +1449,15 @@ static int domain_context_mapping_one(struct dmar_domain *domain, ...@@ -1422,13 +1449,15 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
} }
static int static int
domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev) domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
int translation)
{ {
int ret; int ret;
struct pci_dev *tmp, *parent; struct pci_dev *tmp, *parent;
ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus), ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
pdev->bus->number, pdev->devfn); pdev->bus->number, pdev->devfn,
translation);
if (ret) if (ret)
return ret; return ret;
...@@ -1442,7 +1471,7 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev) ...@@ -1442,7 +1471,7 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
ret = domain_context_mapping_one(domain, ret = domain_context_mapping_one(domain,
pci_domain_nr(parent->bus), pci_domain_nr(parent->bus),
parent->bus->number, parent->bus->number,
parent->devfn); parent->devfn, translation);
if (ret) if (ret)
return ret; return ret;
parent = parent->bus->self; parent = parent->bus->self;
...@@ -1450,12 +1479,14 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev) ...@@ -1450,12 +1479,14 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
return domain_context_mapping_one(domain, return domain_context_mapping_one(domain,
pci_domain_nr(tmp->subordinate), pci_domain_nr(tmp->subordinate),
tmp->subordinate->number, 0); tmp->subordinate->number, 0,
translation);
else /* this is a legacy PCI bridge */ else /* this is a legacy PCI bridge */
return domain_context_mapping_one(domain, return domain_context_mapping_one(domain,
pci_domain_nr(tmp->bus), pci_domain_nr(tmp->bus),
tmp->bus->number, tmp->bus->number,
tmp->devfn); tmp->devfn,
translation);
} }
static int domain_context_mapped(struct pci_dev *pdev) static int domain_context_mapped(struct pci_dev *pdev)
...@@ -1752,7 +1783,7 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, ...@@ -1752,7 +1783,7 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
goto error; goto error;
/* context entry init */ /* context entry init */
ret = domain_context_mapping(domain, pdev); ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
if (!ret) if (!ret)
return 0; return 0;
error: error:
...@@ -1853,6 +1884,23 @@ static inline void iommu_prepare_isa(void) ...@@ -1853,6 +1884,23 @@ static inline void iommu_prepare_isa(void)
} }
#endif /* !CONFIG_DMAR_FLPY_WA */ #endif /* !CONFIG_DMAR_FLPY_WA */
/* Initialize each context entry as pass through.*/
static int __init init_context_pass_through(void)
{
struct pci_dev *pdev = NULL;
struct dmar_domain *domain;
int ret;
for_each_pci_dev(pdev) {
domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
ret = domain_context_mapping(domain, pdev,
CONTEXT_TT_PASS_THROUGH);
if (ret)
return ret;
}
return 0;
}
static int __init init_dmars(void) static int __init init_dmars(void)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
...@@ -1860,6 +1908,7 @@ static int __init init_dmars(void) ...@@ -1860,6 +1908,7 @@ static int __init init_dmars(void)
struct pci_dev *pdev; struct pci_dev *pdev;
struct intel_iommu *iommu; struct intel_iommu *iommu;
int i, ret; int i, ret;
int pass_through = 1;
/* /*
* for each drhd * for each drhd
...@@ -1913,7 +1962,15 @@ static int __init init_dmars(void) ...@@ -1913,7 +1962,15 @@ static int __init init_dmars(void)
printk(KERN_ERR "IOMMU: allocate root entry failed\n"); printk(KERN_ERR "IOMMU: allocate root entry failed\n");
goto error; goto error;
} }
if (!ecap_pass_through(iommu->ecap))
pass_through = 0;
} }
if (iommu_pass_through)
if (!pass_through) {
printk(KERN_INFO
"Pass Through is not supported by hardware.\n");
iommu_pass_through = 0;
}
/* /*
* Start from the sane iommu hardware state. * Start from the sane iommu hardware state.
...@@ -1976,37 +2033,57 @@ static int __init init_dmars(void) ...@@ -1976,37 +2033,57 @@ static int __init init_dmars(void)
"IOMMU: enable interrupt remapping failed\n"); "IOMMU: enable interrupt remapping failed\n");
} }
#endif #endif
/*
* If pass through is set and enabled, context entries of all pci
* devices are intialized by pass through translation type.
*/
if (iommu_pass_through) {
ret = init_context_pass_through();
if (ret) {
printk(KERN_ERR "IOMMU: Pass through init failed.\n");
iommu_pass_through = 0;
}
}
/* /*
* For each rmrr * If pass through is not set or not enabled, setup context entries for
* for each dev attached to rmrr * identity mappings for rmrr, gfx, and isa.
* do
* locate drhd for dev, alloc domain for dev
* allocate free domain
* allocate page table entries for rmrr
* if context not allocated for bus
* allocate and init context
* set present in root table for this bus
* init context with domain, translation etc
* endfor
* endfor
*/ */
for_each_rmrr_units(rmrr) { if (!iommu_pass_through) {
for (i = 0; i < rmrr->devices_cnt; i++) { /*
pdev = rmrr->devices[i]; * For each rmrr
/* some BIOS lists non-exist devices in DMAR table */ * for each dev attached to rmrr
if (!pdev) * do
continue; * locate drhd for dev, alloc domain for dev
ret = iommu_prepare_rmrr_dev(rmrr, pdev); * allocate free domain
if (ret) * allocate page table entries for rmrr
printk(KERN_ERR * if context not allocated for bus
* allocate and init context
* set present in root table for this bus
* init context with domain, translation etc
* endfor
* endfor
*/
for_each_rmrr_units(rmrr) {
for (i = 0; i < rmrr->devices_cnt; i++) {
pdev = rmrr->devices[i];
/*
* some BIOS lists non-exist devices in DMAR
* table.
*/
if (!pdev)
continue;
ret = iommu_prepare_rmrr_dev(rmrr, pdev);
if (ret)
printk(KERN_ERR
"IOMMU: mapping reserved region failed\n"); "IOMMU: mapping reserved region failed\n");
}
} }
}
iommu_prepare_gfx_mapping(); iommu_prepare_gfx_mapping();
iommu_prepare_isa(); iommu_prepare_isa();
}
/* /*
* for each drhd * for each drhd
...@@ -2117,7 +2194,8 @@ get_valid_domain_for_dev(struct pci_dev *pdev) ...@@ -2117,7 +2194,8 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
/* make sure context mapping is ok */ /* make sure context mapping is ok */
if (unlikely(!domain_context_mapped(pdev))) { if (unlikely(!domain_context_mapped(pdev))) {
ret = domain_context_mapping(domain, pdev); ret = domain_context_mapping(domain, pdev,
CONTEXT_TT_MULTI_LEVEL);
if (ret) { if (ret) {
printk(KERN_ERR printk(KERN_ERR
"Domain context map for %s failed", "Domain context map for %s failed",
...@@ -2786,7 +2864,7 @@ int __init intel_iommu_init(void) ...@@ -2786,7 +2864,7 @@ int __init intel_iommu_init(void)
* Check the need for DMA-remapping initialization now. * Check the need for DMA-remapping initialization now.
* Above initialization will also be used by Interrupt-remapping. * Above initialization will also be used by Interrupt-remapping.
*/ */
if (no_iommu || swiotlb || dmar_disabled) if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
return -ENODEV; return -ENODEV;
iommu_init_mempool(); iommu_init_mempool();
...@@ -2806,7 +2884,15 @@ int __init intel_iommu_init(void) ...@@ -2806,7 +2884,15 @@ int __init intel_iommu_init(void)
init_timer(&unmap_timer); init_timer(&unmap_timer);
force_iommu = 1; force_iommu = 1;
dma_ops = &intel_dma_ops;
if (!iommu_pass_through) {
printk(KERN_INFO
"Multi-level page-table translation for DMAR.\n");
dma_ops = &intel_dma_ops;
} else
printk(KERN_INFO
"DMAR: Pass through translation for DMAR.\n");
init_iommu_sysfs(); init_iommu_sysfs();
register_iommu(&intel_iommu_ops); register_iommu(&intel_iommu_ops);
...@@ -3146,7 +3232,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, ...@@ -3146,7 +3232,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
return -EFAULT; return -EFAULT;
} }
ret = domain_context_mapping(dmar_domain, pdev); ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
if (ret) if (ret)
return ret; return ret;
......
...@@ -13,6 +13,9 @@ ...@@ -13,6 +13,9 @@
#define DMA_PTE_WRITE (2) #define DMA_PTE_WRITE (2)
#define DMA_PTE_SNP (1 << 11) #define DMA_PTE_SNP (1 << 11)
#define CONTEXT_TT_MULTI_LEVEL 0
#define CONTEXT_TT_PASS_THROUGH 2
struct intel_iommu; struct intel_iommu;
struct dmar_domain; struct dmar_domain;
struct root_entry; struct root_entry;
...@@ -21,11 +24,16 @@ extern void free_dmar_iommu(struct intel_iommu *iommu); ...@@ -21,11 +24,16 @@ extern void free_dmar_iommu(struct intel_iommu *iommu);
#ifdef CONFIG_DMAR #ifdef CONFIG_DMAR
extern int iommu_calculate_agaw(struct intel_iommu *iommu); extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
#else #else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu) static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{ {
return 0; return 0;
} }
static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
{
return 0;
}
#endif #endif
extern int dmar_disabled; extern int dmar_disabled;
......
...@@ -120,6 +120,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) ...@@ -120,6 +120,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
(ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16) (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
#define ecap_coherent(e) ((e) & 0x1) #define ecap_coherent(e) ((e) & 0x1)
#define ecap_qis(e) ((e) & 0x2) #define ecap_qis(e) ((e) & 0x2)
#define ecap_pass_through(e) ((e >> 6) & 0x1)
#define ecap_eim_support(e) ((e >> 4) & 0x1) #define ecap_eim_support(e) ((e >> 4) & 0x1)
#define ecap_ir_support(e) ((e >> 3) & 0x1) #define ecap_ir_support(e) ((e >> 3) & 0x1)
#define ecap_max_handle_mask(e) ((e >> 20) & 0xf) #define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
...@@ -302,6 +303,7 @@ struct intel_iommu { ...@@ -302,6 +303,7 @@ struct intel_iommu {
spinlock_t register_lock; /* protect register handling */ spinlock_t register_lock; /* protect register handling */
int seq_id; /* sequence id of the iommu */ int seq_id; /* sequence id of the iommu */
int agaw; /* agaw of this iommu */ int agaw; /* agaw of this iommu */
int msagaw; /* max sagaw of this iommu */
unsigned int irq; unsigned int irq;
unsigned char name[13]; /* Device Name */ unsigned char name[13]; /* Device Name */
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment