OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [pci/] [intel-iommu.h] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 * Copyright (c) 2006, Intel Corporation.
3
 *
4
 * This program is free software; you can redistribute it and/or modify it
5
 * under the terms and conditions of the GNU General Public License,
6
 * version 2, as published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope it will be useful, but WITHOUT
9
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11
 * more details.
12
 *
13
 * You should have received a copy of the GNU General Public License along with
14
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15
 * Place - Suite 330, Boston, MA 02111-1307 USA.
16
 *
17
 * Copyright (C) Ashok Raj <ashok.raj@intel.com>
18
 * Copyright (C) Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
19
 */
20
 
21
#ifndef _INTEL_IOMMU_H_
22
#define _INTEL_IOMMU_H_
23
 
24
#include <linux/types.h>
25
#include <linux/msi.h>
26
#include "iova.h"
27
#include <linux/io.h>
28
 
29
/*
30
 * Intel IOMMU register specification per version 1.0 public spec.
31
 */
32
 
33
#define DMAR_VER_REG    0x0     /* Arch version supported by this IOMMU */
34
#define DMAR_CAP_REG    0x8     /* Hardware supported capabilities */
35
#define DMAR_ECAP_REG   0x10    /* Extended capabilities supported */
36
#define DMAR_GCMD_REG   0x18    /* Global command register */
37
#define DMAR_GSTS_REG   0x1c    /* Global status register */
38
#define DMAR_RTADDR_REG 0x20    /* Root entry table */
39
#define DMAR_CCMD_REG   0x28    /* Context command reg */
40
#define DMAR_FSTS_REG   0x34    /* Fault Status register */
41
#define DMAR_FECTL_REG  0x38    /* Fault control register */
42
#define DMAR_FEDATA_REG 0x3c    /* Fault event interrupt data register */
43
#define DMAR_FEADDR_REG 0x40    /* Fault event interrupt addr register */
44
#define DMAR_FEUADDR_REG 0x44   /* Upper address register */
45
#define DMAR_AFLOG_REG  0x58    /* Advanced Fault control */
46
#define DMAR_PMEN_REG   0x64    /* Enable Protected Memory Region */
47
#define DMAR_PLMBASE_REG 0x68   /* PMRR Low addr */
48
#define DMAR_PLMLIMIT_REG 0x6c  /* PMRR low limit */
49
#define DMAR_PHMBASE_REG 0x70   /* pmrr high base addr */
50
#define DMAR_PHMLIMIT_REG 0x78  /* pmrr high limit */
51
 
52
#define OFFSET_STRIDE           (9)
53
/*
54
#define dmar_readl(dmar, reg) readl(dmar + reg)
55
#define dmar_readq(dmar, reg) ({ \
56
                u32 lo, hi; \
57
                lo = readl(dmar + reg); \
58
                hi = readl(dmar + reg + 4); \
59
                (((u64) hi) << 32) + lo; })
60
*/
61
static inline u64 dmar_readq(void __iomem *addr)
62
{
63
        u32 lo, hi;
64
        lo = readl(addr);
65
        hi = readl(addr + 4);
66
        return (((u64) hi) << 32) + lo;
67
}
68
 
69
static inline void dmar_writeq(void __iomem *addr, u64 val)
70
{
71
        writel((u32)val, addr);
72
        writel((u32)(val >> 32), addr + 4);
73
}
74
 
75
#define DMAR_VER_MAJOR(v)               (((v) & 0xf0) >> 4)
76
#define DMAR_VER_MINOR(v)               ((v) & 0x0f)
77
 
78
/*
79
 * Decoding Capability Register
80
 */
81
#define cap_read_drain(c)       (((c) >> 55) & 1)
82
#define cap_write_drain(c)      (((c) >> 54) & 1)
83
#define cap_max_amask_val(c)    (((c) >> 48) & 0x3f)
84
#define cap_num_fault_regs(c)   ((((c) >> 40) & 0xff) + 1)
85
#define cap_pgsel_inv(c)        (((c) >> 39) & 1)
86
 
87
#define cap_super_page_val(c)   (((c) >> 34) & 0xf)
88
#define cap_super_offset(c)     (((find_first_bit(&cap_super_page_val(c), 4)) \
89
                                        * OFFSET_STRIDE) + 21)
90
 
91
#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
92
#define cap_max_fault_reg_offset(c) \
93
        (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
94
 
95
#define cap_zlr(c)              (((c) >> 22) & 1)
96
#define cap_isoch(c)            (((c) >> 23) & 1)
97
#define cap_mgaw(c)             ((((c) >> 16) & 0x3f) + 1)
98
#define cap_sagaw(c)            (((c) >> 8) & 0x1f)
99
#define cap_caching_mode(c)     (((c) >> 7) & 1)
100
#define cap_phmr(c)             (((c) >> 6) & 1)
101
#define cap_plmr(c)             (((c) >> 5) & 1)
102
#define cap_rwbf(c)             (((c) >> 4) & 1)
103
#define cap_afl(c)              (((c) >> 3) & 1)
104
#define cap_ndoms(c)            (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
105
/*
106
 * Extended Capability Register
107
 */
108
 
109
#define ecap_niotlb_iunits(e)   ((((e) >> 24) & 0xff) + 1)
110
#define ecap_iotlb_offset(e)    ((((e) >> 8) & 0x3ff) * 16)
111
#define ecap_max_iotlb_offset(e) \
112
        (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
113
#define ecap_coherent(e)        ((e) & 0x1)
114
 
115
 
116
/* IOTLB_REG */
117
#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
118
#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
119
#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
120
#define DMA_TLB_IIRG(type) ((type >> 60) & 7)
121
#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
122
#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
123
#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
124
#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
125
#define DMA_TLB_IVT (((u64)1) << 63)
126
#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
127
#define DMA_TLB_MAX_SIZE (0x3f)
128
 
129
/* GCMD_REG */
130
#define DMA_GCMD_TE (((u32)1) << 31)
131
#define DMA_GCMD_SRTP (((u32)1) << 30)
132
#define DMA_GCMD_SFL (((u32)1) << 29)
133
#define DMA_GCMD_EAFL (((u32)1) << 28)
134
#define DMA_GCMD_WBF (((u32)1) << 27)
135
 
136
/* GSTS_REG */
137
#define DMA_GSTS_TES (((u32)1) << 31)
138
#define DMA_GSTS_RTPS (((u32)1) << 30)
139
#define DMA_GSTS_FLS (((u32)1) << 29)
140
#define DMA_GSTS_AFLS (((u32)1) << 28)
141
#define DMA_GSTS_WBFS (((u32)1) << 27)
142
 
143
/* CCMD_REG */
144
#define DMA_CCMD_ICC (((u64)1) << 63)
145
#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
146
#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
147
#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
148
#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
149
#define DMA_CCMD_MASK_NOBIT 0
150
#define DMA_CCMD_MASK_1BIT 1
151
#define DMA_CCMD_MASK_2BIT 2
152
#define DMA_CCMD_MASK_3BIT 3
153
#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
154
#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
155
 
156
/* FECTL_REG */
157
#define DMA_FECTL_IM (((u32)1) << 31)
158
 
159
/* FSTS_REG */
160
#define DMA_FSTS_PPF ((u32)2)
161
#define DMA_FSTS_PFO ((u32)1)
162
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
163
 
164
/* FRCD_REG, 32 bits access */
165
#define DMA_FRCD_F (((u32)1) << 31)
166
#define dma_frcd_type(d) ((d >> 30) & 1)
167
#define dma_frcd_fault_reason(c) (c & 0xff)
168
#define dma_frcd_source_id(c) (c & 0xffff)
169
#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
170
 
171
/*
172
 * 0: Present
173
 * 1-11: Reserved
174
 * 12-63: Context Ptr (12 - (haw-1))
175
 * 64-127: Reserved
176
 */
177
struct root_entry {
178
        u64     val;
179
        u64     rsvd1;
180
};
181
#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry))
182
static inline bool root_present(struct root_entry *root)
183
{
184
        return (root->val & 1);
185
}
186
static inline void set_root_present(struct root_entry *root)
187
{
188
        root->val |= 1;
189
}
190
static inline void set_root_value(struct root_entry *root, unsigned long value)
191
{
192
        root->val |= value & PAGE_MASK_4K;
193
}
194
 
195
struct context_entry;
196
static inline struct context_entry *
197
get_context_addr_from_root(struct root_entry *root)
198
{
199
        return (struct context_entry *)
200
                (root_present(root)?phys_to_virt(
201
                root->val & PAGE_MASK_4K):
202
                NULL);
203
}
204
 
205
/*
206
 * low 64 bits:
207
 * 0: present
208
 * 1: fault processing disable
209
 * 2-3: translation type
210
 * 12-63: address space root
211
 * high 64 bits:
212
 * 0-2: address width
213
 * 3-6: aval
214
 * 8-23: domain id
215
 */
216
struct context_entry {
217
        u64 lo;
218
        u64 hi;
219
};
220
#define context_present(c) ((c).lo & 1)
221
#define context_fault_disable(c) (((c).lo >> 1) & 1)
222
#define context_translation_type(c) (((c).lo >> 2) & 3)
223
#define context_address_root(c) ((c).lo & PAGE_MASK_4K)
224
#define context_address_width(c) ((c).hi &  7)
225
#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
226
 
227
#define context_set_present(c) do {(c).lo |= 1;} while (0)
228
#define context_set_fault_enable(c) \
229
        do {(c).lo &= (((u64)-1) << 2) | 1;} while (0)
230
#define context_set_translation_type(c, val) \
231
        do { \
232
                (c).lo &= (((u64)-1) << 4) | 3; \
233
                (c).lo |= ((val) & 3) << 2; \
234
        } while (0)
235
#define CONTEXT_TT_MULTI_LEVEL 0
236
#define context_set_address_root(c, val) \
237
        do {(c).lo |= (val) & PAGE_MASK_4K;} while (0)
238
#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
239
#define context_set_domain_id(c, val) \
240
        do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
241
#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0)
242
 
243
/*
244
 * 0: readable
245
 * 1: writable
246
 * 2-6: reserved
247
 * 7: super page
248
 * 8-11: available
249
 * 12-63: Host physcial address
250
 */
251
struct dma_pte {
252
        u64 val;
253
};
254
#define dma_clear_pte(p)        do {(p).val = 0;} while (0)
255
 
256
#define DMA_PTE_READ (1)
257
#define DMA_PTE_WRITE (2)
258
 
259
#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0)
260
#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
261
#define dma_set_pte_prot(p, prot) \
262
                do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
263
#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
264
#define dma_set_pte_addr(p, addr) do {\
265
                (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
266
#define dma_pte_present(p) (((p).val & 3) != 0)
267
 
268
struct intel_iommu;
269
 
270
struct dmar_domain {
271
        int     id;                     /* domain id */
272
        struct intel_iommu *iommu;      /* back pointer to owning iommu */
273
 
274
        struct list_head devices;       /* all devices' list */
275
        struct iova_domain iovad;       /* iova's that belong to this domain */
276
 
277
        struct dma_pte  *pgd;           /* virtual address */
278
        spinlock_t      mapping_lock;   /* page table lock */
279
        int             gaw;            /* max guest address width */
280
 
281
        /* adjusted guest address width, 0 is level 2 30-bit */
282
        int             agaw;
283
 
284
#define DOMAIN_FLAG_MULTIPLE_DEVICES 1
285
        int             flags;
286
};
287
 
288
/* PCI domain-device relationship */
289
struct device_domain_info {
290
        struct list_head link;  /* link to domain siblings */
291
        struct list_head global; /* link to global list */
292
        u8 bus;                 /* PCI bus numer */
293
        u8 devfn;               /* PCI devfn number */
294
        struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
295
        struct dmar_domain *domain; /* pointer to domain */
296
};
297
 
298
extern int init_dmars(void);
299
 
300
struct intel_iommu {
301
        void __iomem    *reg; /* Pointer to hardware regs, virtual addr */
302
        u64             cap;
303
        u64             ecap;
304
        unsigned long   *domain_ids; /* bitmap of domains */
305
        struct dmar_domain **domains; /* ptr to domains */
306
        int             seg;
307
        u32             gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
308
        spinlock_t      lock; /* protect context, domain ids */
309
        spinlock_t      register_lock; /* protect register handling */
310
        struct root_entry *root_entry; /* virtual address */
311
 
312
        unsigned int irq;
313
        unsigned char name[7];    /* Device Name */
314
        struct msi_msg saved_msg;
315
        struct sys_device sysdev;
316
};
317
 
318
#ifndef CONFIG_DMAR_GFX_WA
319
static inline void iommu_prepare_gfx_mapping(void)
320
{
321
        return;
322
}
323
#endif /* !CONFIG_DMAR_GFX_WA */
324
 
325
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.