iommu.h 12.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Copyright (c) 2006, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
14
 * this program; If not, see <http://www.gnu.org/licenses/>.
15 16 17 18 19 20 21
 *
 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
 */

#ifndef _IOMMU_H_
#define _IOMMU_H_

22
#include <xen/mm-frame.h>
23
#include <xen/init.h>
24
#include <xen/page-defs.h>
25
#include <xen/pci.h>
26
#include <xen/spinlock.h>
27
#include <public/domctl.h>
28
#include <public/hvm/ioreq.h>
29
#include <asm/device.h>
30

31 32 33 34 35 36 37 38 39 40 41 42 43
TYPE_SAFE(uint64_t, dfn);
#define PRI_dfn     PRIx64
#define INVALID_DFN _dfn(~0ULL)

#ifndef dfn_t
#define dfn_t /* Grep fodder: dfn_t, _dfn() and dfn_x() are defined above */
#define _dfn
#define dfn_x
#undef dfn_t
#undef _dfn
#undef dfn_x
#endif

44 45 46 47 48
static inline dfn_t dfn_add(dfn_t dfn, unsigned long i)
{
    return _dfn(dfn_x(dfn) + i);
}

49 50 51 52 53
static inline bool_t dfn_eq(dfn_t x, dfn_t y)
{
    return dfn_x(x) == dfn_x(y);
}

54
#ifdef CONFIG_HAS_PASSTHROUGH
55
extern bool_t iommu_enable, iommu_enabled;
56 57 58
extern bool force_iommu, iommu_verbose;
/* Boolean except for the specific purposes of drivers/passthrough/iommu.c. */
extern uint8_t iommu_quarantine;
59 60 61
#else
#define iommu_enabled false
#endif
62 63

#ifdef CONFIG_X86
64 65 66 67 68 69 70 71 72 73 74 75 76
extern enum __packed iommu_intremap {
   /*
    * In order to allow traditional boolean uses of the iommu_intremap
    * variable, the "off" value has to come first (yielding a value of zero).
    */
   iommu_intremap_off,
   /*
    * Interrupt remapping enabled, but only able to generate interrupts
    * with an 8-bit APIC ID.
    */
   iommu_intremap_restricted,
   iommu_intremap_full,
} iommu_intremap;
Jan Beulich's avatar
Jan Beulich committed
77
extern bool iommu_igfx, iommu_qinval, iommu_snoop;
78 79
#else
# define iommu_intremap false
Jan Beulich's avatar
Jan Beulich committed
80
# define iommu_snoop false
81
#endif
82

83 84 85 86 87 88
#if defined(CONFIG_X86) && defined(CONFIG_HVM)
extern bool iommu_intpost;
#else
# define iommu_intpost false
#endif

89 90 91
#if defined(CONFIG_IOMMU_FORCE_PT_SHARE)
#define iommu_hap_pt_share true
#elif defined(CONFIG_HVM)
92 93 94 95 96 97 98 99 100 101 102 103 104 105
extern bool iommu_hap_pt_share;
#else
#define iommu_hap_pt_share false
#endif

static inline void clear_iommu_hap_pt_share(void)
{
#ifndef iommu_hap_pt_share
    iommu_hap_pt_share = false;
#elif iommu_hap_pt_share
    ASSERT_UNREACHABLE();
#endif
}

106
extern bool_t iommu_debug;
107
extern bool_t amd_iommu_perdev_intremap;
108

109 110
extern bool iommu_hwdom_strict, iommu_hwdom_passthrough, iommu_hwdom_inclusive;
extern int8_t iommu_hwdom_reserved;
111

112 113
extern unsigned int iommu_dev_iotlb_timeout;

114
int iommu_setup(void);
115
int iommu_hardware_setup(void);
116

117
int iommu_domain_init(struct domain *d, unsigned int opts);
118
void iommu_hwdom_init(struct domain *d);
119
void iommu_domain_destroy(struct domain *d);
120

121 122
void arch_iommu_domain_destroy(struct domain *d);
int arch_iommu_domain_init(struct domain *d);
Julien Grall's avatar
Julien Grall committed
123
void arch_iommu_check_autotranslated_hwdom(struct domain *d);
124
void arch_iommu_hwdom_init(struct domain *d);
Julien Grall's avatar
Julien Grall committed
125

126 127 128 129
/*
 * The following flags are passed to map operations and passed by lookup
 * operations.
 */
130 131 132 133
#define _IOMMUF_readable 0
#define IOMMUF_readable  (1u<<_IOMMUF_readable)
#define _IOMMUF_writable 1
#define IOMMUF_writable  (1u<<_IOMMUF_writable)
134

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
/*
 * flush_flags:
 *
 * IOMMU_FLUSHF_added -> A new 'present' PTE has been inserted.
 * IOMMU_FLUSHF_modified -> An existing 'present' PTE has been modified
 *                          (whether the new PTE value is 'present' or not).
 *
 * These flags are passed back from map/unmap operations and passed into
 * flush operations.
 */
enum
{
    _IOMMU_FLUSHF_added,
    _IOMMU_FLUSHF_modified,
};
#define IOMMU_FLUSHF_added (1u << _IOMMU_FLUSHF_added)
#define IOMMU_FLUSHF_modified (1u << _IOMMU_FLUSHF_modified)

int __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
154
                           unsigned long page_count, unsigned int flags,
155 156
                           unsigned int *flush_flags);
int __must_check iommu_unmap(struct domain *d, dfn_t dfn,
157
                             unsigned long page_count,
158 159
                             unsigned int *flush_flags);

160
int __must_check iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
161
                                  unsigned long page_count,
162 163
                                  unsigned int flags);
int __must_check iommu_legacy_unmap(struct domain *d, dfn_t dfn,
164
                                    unsigned long page_count);
165

166 167
int __must_check iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
                                   unsigned int *flags);
168

169
int __must_check iommu_iotlb_flush(struct domain *d, dfn_t dfn,
170
                                   unsigned long page_count,
171 172 173 174
                                   unsigned int flush_flags);
int __must_check iommu_iotlb_flush_all(struct domain *d,
                                       unsigned int flush_flags);

175 176 177 178 179 180 181 182
enum iommu_feature
{
    IOMMU_FEAT_COHERENT_WALK,
    IOMMU_FEAT_count
};

bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature);

183
#ifdef CONFIG_HAS_PCI
184 185
struct pirq;
int hvm_do_IRQ_dpci(struct domain *, struct pirq *);
186 187
int pt_irq_create_bind(struct domain *, const struct xen_domctl_bind_pt_irq *);
int pt_irq_destroy_bind(struct domain *, const struct xen_domctl_bind_pt_irq *);
188

189
void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq);
Jan Beulich's avatar
Jan Beulich committed
190
struct hvm_irq_dpci *domain_get_irq_dpci(const struct domain *);
191
void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci);
192

193 194
struct msi_desc;
struct msi_msg;
Julien Grall's avatar
Julien Grall committed
195 196 197 198

#define PT_IRQ_TIME_OUT MILLISECS(8)
#endif /* HAS_PCI */

199
#ifdef CONFIG_HAS_DEVICE_TREE
200 201 202 203 204
#include <xen/device_tree.h>

int iommu_assign_dt_device(struct domain *d, struct dt_device_node *dev);
int iommu_deassign_dt_device(struct domain *d, struct dt_device_node *dev);
int iommu_dt_domain_init(struct domain *d);
205
int iommu_release_dt_devices(struct domain *d);
206

207 208 209 210 211 212 213 214 215 216 217
/*
 * Helper to add master device to the IOMMU using generic IOMMU DT bindings.
 *
 * Return values:
 *  0 : device is protected by an IOMMU
 * <0 : device is not protected by an IOMMU, but must be (error condition)
 * >0 : device doesn't need to be protected by an IOMMU
 *      (IOMMU is not enabled/present or device is not connected to it).
 */
int iommu_add_dt_device(struct dt_device_node *np);

218 219 220
int iommu_do_dt_domctl(struct xen_domctl *, struct domain *,
                       XEN_GUEST_HANDLE_PARAM(xen_domctl_t));

221 222
#endif /* HAS_DEVICE_TREE */

223
struct page_info;
224

225 226 227 228 229 230 231 232
/*
 * Any non-zero value returned from callbacks of this type will cause the
 * function the callback was handed to terminate its iteration. Assigning
 * meaning of these non-zero values is left to the top level caller /
 * callback pair.
 */
typedef int iommu_grdm_t(xen_pfn_t start, xen_ulong_t nr, u32 id, void *ctxt);

233 234
struct iommu_ops {
    int (*init)(struct domain *d);
235
    void (*hwdom_init)(struct domain *d);
236
    int (*quarantine_init)(struct domain *d);
237 238 239
    int (*add_device)(u8 devfn, device_t *dev);
    int (*enable_device)(device_t *dev);
    int (*remove_device)(u8 devfn, device_t *dev);
240
    int (*assign_device)(struct domain *, u8 devfn, device_t *dev, u32 flag);
Julien Grall's avatar
Julien Grall committed
241
    int (*reassign_device)(struct domain *s, struct domain *t,
242
                           u8 devfn, device_t *dev);
243
#ifdef CONFIG_HAS_PCI
Julien Grall's avatar
Julien Grall committed
244 245
    int (*get_device_group_id)(u16 seg, u8 bus, u8 devfn);
#endif /* HAS_PCI */
246

247
    void (*teardown)(struct domain *d);
248 249 250 251 252

    /*
     * This block of operations must be appropriately locked against each
     * other by the caller in order to have meaningful results.
     */
253
    int __must_check (*map_page)(struct domain *d, dfn_t dfn, mfn_t mfn,
254 255 256 257
                                 unsigned int flags,
                                 unsigned int *flush_flags);
    int __must_check (*unmap_page)(struct domain *d, dfn_t dfn,
                                   unsigned int *flush_flags);
258 259 260
    int __must_check (*lookup_page)(struct domain *d, dfn_t dfn, mfn_t *mfn,
                                    unsigned int *flags);

Julien Grall's avatar
Julien Grall committed
261
#ifdef CONFIG_X86
262 263 264
    int (*enable_x2apic)(void);
    void (*disable_x2apic)(void);

265
    void (*update_ire_from_apic)(unsigned int apic, unsigned int reg, unsigned int value);
266
    unsigned int (*read_apic_from_ire)(unsigned int apic, unsigned int reg);
267

268
    int (*setup_hpet_msi)(struct msi_desc *);
269 270

    int (*adjust_irq_affinities)(void);
271
    void (*sync_cache)(const void *addr, unsigned int size);
272
    void (*clear_root_pgtable)(struct domain *d);
273
    int (*update_ire_from_msi)(struct msi_desc *msi_desc, struct msi_msg *msg);
Julien Grall's avatar
Julien Grall committed
274
#endif /* CONFIG_X86 */
275

276
    int __must_check (*suspend)(void);
277
    void (*resume)(void);
278
    void (*crash_shutdown)(void);
279
    int __must_check (*iotlb_flush)(struct domain *d, dfn_t dfn,
280
                                    unsigned long page_count,
281
                                    unsigned int flush_flags);
282
    int __must_check (*iotlb_flush_all)(struct domain *d);
283
    int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
284
    void (*dump_page_tables)(struct domain *d);
285 286 287 288 289 290 291 292 293 294

#ifdef CONFIG_HAS_DEVICE_TREE
    /*
     * All IOMMU drivers which support generic IOMMU DT bindings should use
     * this callback. This is a way for the framework to provide the driver
     * with DT IOMMU specifier which describes the IOMMU master interfaces of
     * that device (device IDs, etc).
     */
    int (*dt_xlate)(device_t *dev, const struct dt_phandle_args *args);
#endif
295 296
};

297 298
#include <asm/iommu.h>

299 300 301 302 303
#ifndef iommu_call
# define iommu_call(ops, fn, args...) ((ops)->fn(args))
# define iommu_vcall iommu_call
#endif

304 305 306 307 308 309 310 311 312 313 314
struct domain_iommu {
    struct arch_iommu arch;

    /* iommu_ops */
    const struct iommu_ops *platform_ops;

#ifdef CONFIG_HAS_DEVICE_TREE
    /* List of DT devices assigned to this domain */
    struct list_head dt_devices;
#endif

Jan Beulich's avatar
Jan Beulich committed
315 316 317 318 319
#ifdef CONFIG_NUMA
    /* NUMA node to do IOMMU related allocations against. */
    nodeid_t node;
#endif

320 321 322
    /* Features supported by the IOMMU */
    DECLARE_BITMAP(features, IOMMU_FEAT_count);

323 324 325
    /* Does the guest share HAP mapping with the IOMMU? */
    bool hap_pt_share;

326
    /*
327 328 329 330
     * Does the guest require mappings to be synchronized, to maintain
     * the default dfn == pfn map? (See comment on dfn at the top of
     * include/xen/mm.h). Note that hap_pt_share == false does not
     * necessarily imply this is true.
331 332 333 334 335 336 337 338
     */
    bool need_sync;
};

#define dom_iommu(d)              (&(d)->iommu)
#define iommu_set_feature(d, f)   set_bit(f, dom_iommu(d)->features)
#define iommu_clear_feature(d, f) clear_bit(f, dom_iommu(d)->features)

339
/* Are we using the domain P2M table as its IOMMU pagetable? */
340
#define iommu_use_hap_pt(d)       (dom_iommu(d)->hap_pt_share)
341 342 343 344 345 346 347 348

/* Does the IOMMU pagetable need to be kept synchronized with the P2M */
#ifdef CONFIG_HAS_PASSTHROUGH
#define need_iommu_pt_sync(d)     (dom_iommu(d)->need_sync)
#else
#define need_iommu_pt_sync(d)     ({ (void)(d); false; })
#endif

349
int __must_check iommu_suspend(void);
350
void iommu_resume(void);
351
void iommu_crash_shutdown(void);
352
int iommu_get_reserved_device_memory(iommu_grdm_t *, void *);
353

354
#ifdef CONFIG_HAS_PCI
Julien Grall's avatar
Julien Grall committed
355 356 357 358
int iommu_do_pci_domctl(struct xen_domctl *, struct domain *d,
                        XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
#endif

359 360
int iommu_do_domctl(struct xen_domctl *, struct domain *d,
                    XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
361

362 363
void iommu_dev_iotlb_flush_timeout(struct domain *d, struct pci_dev *pdev);

364 365 366 367 368 369 370 371 372 373 374 375
/*
 * The purpose of the iommu_dont_flush_iotlb optional cpu flag is to
 * avoid unecessary iotlb_flush in the low level IOMMU code.
 *
 * iommu_map_page/iommu_unmap_page must flush the iotlb but somethimes
 * this operation can be really expensive. This flag will be set by the
 * caller to notify the low level IOMMU code to avoid the iotlb flushes.
 * iommu_iotlb_flush/iommu_iotlb_flush_all will be explicitly called by
 * the caller.
 */
DECLARE_PER_CPU(bool_t, iommu_dont_flush_iotlb);

376 377 378
extern struct spinlock iommu_pt_cleanup_lock;
extern struct page_list_head iommu_pt_cleanup_list;

379 380
bool arch_iommu_use_permitted(const struct domain *d);

381 382 383 384 385 386 387 388 389
#ifdef CONFIG_X86
static inline int iommu_update_ire_from_msi(
    struct msi_desc *msi_desc, struct msi_msg *msg)
{
    return iommu_intremap
           ? iommu_call(&iommu_ops, update_ire_from_msi, msi_desc, msg) : 0;
}
#endif

390
#endif /* _IOMMU_H_ */
391 392 393 394 395 396 397 398 399

/*
 * Local variables:
 * mode: C
 * c-file-style: "BSD"
 * c-basic-offset: 4
 * indent-tabs-mode: nil
 * End:
 */