Commit a43d6abb authored by Keir Fraser's avatar Keir Fraser

x86 iommu: Define vendor-neutral interface for access to IOMMU.

Signed-off-by: default avatarWei Wang <wei.wang2@amd.com>
Signed-off-by: default avatarKeir Fraser <keir.fraser@citrix.com>
parent 280e2b12
......@@ -530,7 +530,7 @@ long arch_do_domctl(
u8 bus, devfn;
ret = -EINVAL;
if ( !vtd_enabled )
if ( !iommu_enabled )
break;
bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
......@@ -553,7 +553,7 @@ long arch_do_domctl(
u8 bus, devfn;
ret = -EINVAL;
if ( !vtd_enabled )
if ( !iommu_enabled )
break;
if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
......@@ -589,9 +589,9 @@ long arch_do_domctl(
if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
break;
bind = &(domctl->u.bind_pt_irq);
if (vtd_enabled)
if ( iommu_enabled )
ret = pt_irq_create_bind_vtd(d, bind);
if (ret < 0)
if ( ret < 0 )
gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
rcu_unlock_domain(d);
}
......
......@@ -6,6 +6,7 @@ obj-y += i8254.o
obj-y += instrlen.o
obj-y += intercept.o
obj-y += io.o
obj-y += iommu.o
obj-y += irq.o
obj-y += mtrr.o
obj-y += platform.o
......
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
#include <xen/init.h>
#include <xen/irq.h>
#include <xen/spinlock.h>
#include <xen/sched.h>
#include <xen/xmalloc.h>
#include <xen/domain_page.h>
#include <asm/delay.h>
#include <asm/string.h>
#include <asm/mm.h>
#include <asm/iommu.h>
#include <asm/hvm/vmx/intel-iommu.h>
extern struct iommu_ops intel_iommu_ops;
extern struct iommu_ops amd_iommu_ops;
int iommu_domain_init(struct domain *domain)
{
struct hvm_iommu *hd = domain_hvm_iommu(domain);
spin_lock_init(&hd->mapping_lock);
spin_lock_init(&hd->iommu_list_lock);
INIT_LIST_HEAD(&hd->pdev_list);
INIT_LIST_HEAD(&hd->g2m_ioport_list);
if ( !iommu_enabled )
return 0;
switch ( boot_cpu_data.x86_vendor )
{
case X86_VENDOR_INTEL:
hd->platform_ops = &intel_iommu_ops;
break;
case X86_VENDOR_AMD:
hd->platform_ops = &amd_iommu_ops;
break;
default:
BUG();
}
return hd->platform_ops->init(domain);
}
int assign_device(struct domain *d, u8 bus, u8 devfn)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
if ( !iommu_enabled || !hd->platform_ops)
return 0;
return hd->platform_ops->assign_device(d, bus, devfn);
}
void iommu_domain_destroy(struct domain *d)
{
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
uint32_t i;
struct hvm_iommu *hd = domain_hvm_iommu(d);
struct list_head *ioport_list, *digl_list, *tmp;
struct g2m_ioport *ioport;
struct dev_intx_gsi_link *digl;
if ( !iommu_enabled || !hd->platform_ops)
return;
if ( hvm_irq_dpci != NULL )
{
for ( i = 0; i < NR_IRQS; i++ )
{
if ( !hvm_irq_dpci->mirq[i].valid )
continue;
pirq_guest_unbind(d, i);
kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
list_for_each_safe ( digl_list, tmp,
&hvm_irq_dpci->mirq[i].digl_list )
{
digl = list_entry(digl_list,
struct dev_intx_gsi_link, list);
list_del(&digl->list);
xfree(digl);
}
}
d->arch.hvm_domain.irq.dpci = NULL;
xfree(hvm_irq_dpci);
}
if ( hd )
{
list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list )
{
ioport = list_entry(ioport_list, struct g2m_ioport, list);
list_del(&ioport->list);
xfree(ioport);
}
}
return hd->platform_ops->teardown(d);
}
int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
if ( !iommu_enabled || !hd->platform_ops)
return 0;
return hd->platform_ops->map_page(d, gfn, mfn);
}
int iommu_unmap_page(struct domain *d, unsigned long gfn)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
if ( !iommu_enabled || !hd->platform_ops)
return 0;
return hd->platform_ops->unmap_page(d, gfn);
}
......@@ -89,12 +89,14 @@ int __init get_iommu_capabilities(u8 bus, u8 dev, u8 func, u8 cap_ptr,
u32 cap_header, cap_range;
u64 mmio_bar;
#if HACK_BIOS_SETTINGS
/* remove it when BIOS available */
write_pci_config(bus, dev, func,
cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET, 0x00000000);
write_pci_config(bus, dev, func,
cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET, 0x40000001);
/* remove it when BIOS available */
#endif
mmio_bar = (u64)read_pci_config(bus, dev, func,
cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32;
......
......@@ -94,6 +94,46 @@ static void enable_intr_window(struct vcpu *v, struct hvm_intack intack)
vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR;
}
static void svm_dirq_assist(struct vcpu *v)
{
unsigned int irq;
uint32_t device, intx;
struct domain *d = v->domain;
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
struct dev_intx_gsi_link *digl;
if ( !amd_iommu_enabled || (v->vcpu_id != 0) || (hvm_irq_dpci == NULL) )
return;
for ( irq = find_first_bit(hvm_irq_dpci->dirq_mask, NR_IRQS);
irq < NR_IRQS;
irq = find_next_bit(hvm_irq_dpci->dirq_mask, NR_IRQS, irq + 1) )
{
stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)]);
clear_bit(irq, &hvm_irq_dpci->dirq_mask);
list_for_each_entry ( digl, &hvm_irq_dpci->mirq[irq].digl_list, list )
{
device = digl->device;
intx = digl->intx;
hvm_pci_intx_assert(d, device, intx);
spin_lock(&hvm_irq_dpci->dirq_lock);
hvm_irq_dpci->mirq[irq].pending++;
spin_unlock(&hvm_irq_dpci->dirq_lock);
}
/*
* Set a timer to see if the guest can finish the interrupt or not. For
* example, the guest OS may unmask the PIC during boot, before the
* guest driver is loaded. hvm_pci_intx_assert() may succeed, but the
* guest will never deal with the irq, then the physical interrupt line
* will never be deasserted.
*/
set_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)],
NOW() + PT_IRQ_TIME_OUT);
}
}
asmlinkage void svm_intr_assist(void)
{
struct vcpu *v = current;
......@@ -102,6 +142,7 @@ asmlinkage void svm_intr_assist(void)
/* Crank the handle on interrupt state. */
pt_update_irq(v);
svm_dirq_assist(v);
do {
intack = hvm_vcpu_has_pending_irq(v);
......
......@@ -458,7 +458,7 @@ void vioapic_update_EOI(struct domain *d, int vector)
ent->fields.remote_irr = 0;
if ( vtd_enabled )
if ( iommu_enabled )
{
spin_unlock(&d->arch.hvm_domain.irq_lock);
hvm_dpci_eoi(current->domain, gsi, ent);
......
......@@ -1047,7 +1047,7 @@ static void free_iommu(struct iommu *iommu)
agaw = 64; \
agaw; })
int iommu_domain_init(struct domain *domain)
int intel_iommu_domain_init(struct domain *domain)
{
struct hvm_iommu *hd = domain_hvm_iommu(domain);
struct iommu *iommu = NULL;
......@@ -1056,11 +1056,6 @@ int iommu_domain_init(struct domain *domain)
unsigned long sagaw;
struct acpi_drhd_unit *drhd;
spin_lock_init(&hd->mapping_lock);
spin_lock_init(&hd->iommu_list_lock);
INIT_LIST_HEAD(&hd->pdev_list);
INIT_LIST_HEAD(&hd->g2m_ioport_list);
if ( !vtd_enabled || list_empty(&acpi_drhd_units) )
return 0;
......@@ -1550,7 +1545,8 @@ static int domain_context_mapped(struct pci_dev *pdev)
return 0;
}
int iommu_map_page(struct domain *d, paddr_t gfn, paddr_t mfn)
int intel_iommu_map_page(
struct domain *d, unsigned long gfn, unsigned long mfn)
{
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
......@@ -1566,12 +1562,12 @@ int iommu_map_page(struct domain *d, paddr_t gfn, paddr_t mfn)
return 0;
#endif
pg = addr_to_dma_page(d, gfn << PAGE_SHIFT_4K);
pg = addr_to_dma_page(d, (paddr_t)gfn << PAGE_SHIFT_4K);
if ( !pg )
return -ENOMEM;
pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
pte += gfn & LEVEL_MASK;
dma_set_pte_addr(*pte, mfn << PAGE_SHIFT_4K);
dma_set_pte_addr(*pte, (paddr_t)mfn << PAGE_SHIFT_4K);
dma_set_pte_prot(*pte, DMA_PTE_READ | DMA_PTE_WRITE);
iommu_flush_cache_entry(iommu, pte);
unmap_domain_page(pte);
......@@ -1581,7 +1577,7 @@ int iommu_map_page(struct domain *d, paddr_t gfn, paddr_t mfn)
iommu = drhd->iommu;
if ( cap_caching_mode(iommu->cap) )
iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
gfn << PAGE_SHIFT_4K, 1, 0);
(paddr_t)gfn << PAGE_SHIFT_4K, 1, 0);
else if ( cap_rwbf(iommu->cap) )
iommu_flush_write_buffer(iommu);
}
......@@ -1589,7 +1585,7 @@ int iommu_map_page(struct domain *d, paddr_t gfn, paddr_t mfn)
return 0;
}
int iommu_unmap_page(struct domain *d, dma_addr_t gfn)
int intel_iommu_unmap_page(struct domain *d, unsigned long gfn)
{
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
......@@ -1603,12 +1599,12 @@ int iommu_unmap_page(struct domain *d, dma_addr_t gfn)
return 0;
#endif
dma_pte_clear_one(d, gfn << PAGE_SHIFT_4K);
dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
return 0;
}
int iommu_page_mapping(struct domain *domain, dma_addr_t iova,
int iommu_page_mapping(struct domain *domain, paddr_t iova,
void *hpa, size_t size, int prot)
{
struct acpi_drhd_unit *drhd;
......@@ -1655,14 +1651,14 @@ int iommu_page_mapping(struct domain *domain, dma_addr_t iova,
return 0;
}
int iommu_page_unmapping(struct domain *domain, dma_addr_t addr, size_t size)
int iommu_page_unmapping(struct domain *domain, paddr_t addr, size_t size)
{
dma_pte_clear_range(domain, addr, addr + size);
return 0;
}
void iommu_flush(struct domain *d, dma_addr_t gfn, u64 *p2m_entry)
void iommu_flush(struct domain *d, unsigned long gfn, u64 *p2m_entry)
{
struct acpi_drhd_unit *drhd;
struct iommu *iommu = NULL;
......@@ -1673,7 +1669,7 @@ void iommu_flush(struct domain *d, dma_addr_t gfn, u64 *p2m_entry)
iommu = drhd->iommu;
if ( cap_caching_mode(iommu->cap) )
iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
gfn << PAGE_SHIFT_4K, 1, 0);
(paddr_t)gfn << PAGE_SHIFT_4K, 1, 0);
else if ( cap_rwbf(iommu->cap) )
iommu_flush_write_buffer(iommu);
}
......@@ -1921,7 +1917,7 @@ int device_assigned(u8 bus, u8 devfn)
return 1;
}
int assign_device(struct domain *d, u8 bus, u8 devfn)
int intel_iommu_assign_device(struct domain *d, u8 bus, u8 devfn)
{
struct acpi_rmrr_unit *rmrr;
struct pci_dev *pdev;
......@@ -2151,6 +2147,14 @@ int iommu_resume(void)
return 0;
}
struct iommu_ops intel_iommu_ops = {
.init = intel_iommu_domain_init,
.assign_device = intel_iommu_assign_device,
.teardown = iommu_domain_teardown,
.map_page = intel_iommu_map_page,
.unmap_page = intel_iommu_unmap_page,
};
/*
* Local variables:
* mode: C
......
......@@ -141,7 +141,7 @@ int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
{
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
if ( !vtd_enabled || (d == dom0) || (hvm_irq->dpci == NULL) ||
if ( !iommu_enabled || (d == dom0) || (hvm_irq->dpci == NULL) ||
!hvm_irq->dpci->mirq[mirq].valid )
return 0;
......@@ -167,7 +167,7 @@ static void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
int i;
ASSERT(isairq < NR_ISAIRQS);
if ( !vtd_enabled || !dpci ||
if ( !iommu_enabled || !dpci ||
!test_bit(isairq, dpci->isairq_map) )
return;
......@@ -205,7 +205,7 @@ void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
uint32_t device, intx, machine_gsi;
if ( !vtd_enabled || (hvm_irq_dpci == NULL) ||
if ( !iommu_enabled || (hvm_irq_dpci == NULL) ||
(guest_gsi >= NR_ISAIRQS &&
!hvm_irq_dpci->girq[guest_gsi].valid) )
return;
......@@ -235,50 +235,3 @@ void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
else
spin_unlock(&hvm_irq_dpci->dirq_lock);
}
void iommu_domain_destroy(struct domain *d)
{
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
uint32_t i;
struct hvm_iommu *hd = domain_hvm_iommu(d);
struct list_head *ioport_list, *digl_list, *tmp;
struct g2m_ioport *ioport;
struct dev_intx_gsi_link *digl;
if ( !vtd_enabled )
return;
if ( hvm_irq_dpci != NULL )
{
for ( i = 0; i < NR_IRQS; i++ )
if ( hvm_irq_dpci->mirq[i].valid )
{
pirq_guest_unbind(d, i);
kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
list_for_each_safe ( digl_list, tmp,
&hvm_irq_dpci->mirq[i].digl_list )
{
digl = list_entry(digl_list,
struct dev_intx_gsi_link, list);
list_del(&digl->list);
xfree(digl);
}
}
d->arch.hvm_domain.irq.dpci = NULL;
xfree(hvm_irq_dpci);
}
if ( hd )
{
list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list )
{
ioport = list_entry(ioport_list, struct g2m_ioport, list);
list_del(&ioport->list);
xfree(ioport);
}
}
iommu_domain_teardown(d);
}
......@@ -255,8 +255,21 @@ set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, p2m_type_t p2mt)
/* level 1 entry */
paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 1);
if ( vtd_enabled && (p2mt == p2m_mmio_direct) && is_hvm_domain(d) )
iommu_flush(d, gfn, (u64*)p2m_entry);
if ( iommu_enabled && is_hvm_domain(d) )
{
if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
{
if ( (p2mt == p2m_mmio_direct) )
iommu_flush(d, gfn, (u64*)p2m_entry);
}
else if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
{
if ( p2mt == p2m_ram_rw )
iommu_map_page(d, gfn, mfn_x(mfn));
else
iommu_unmap_page(d, gfn);
}
}
/* Success */
rv = 1;
......
......@@ -48,6 +48,9 @@ struct hvm_iommu {
int domain_id;
int paging_mode;
void *root_table;
/* iommu_ops */
struct iommu_ops *platform_ops;
};
#endif // __ASM_X86_HVM_IOMMU_H__
......@@ -263,6 +263,10 @@
#define IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK 0xFFFFFFFF
#define IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT 0
/* INVALIDATE_DEVTAB_ENTRY command */
#define IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK 0x0000FFFF
#define IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_SHIFT 0
/* Event Log */
#define IOMMU_EVENT_LOG_BASE_LOW_OFFSET 0x10
#define IOMMU_EVENT_LOG_BASE_HIGH_OFFSET 0x14
......@@ -415,5 +419,6 @@
#define IOMMU_PAGE_TABLE_LEVEL_4 4
#define IOMMU_IO_WRITE_ENABLED 1
#define IOMMU_IO_READ_ENABLED 1
#define HACK_BIOS_SETTINGS 0
#endif /* _ASM_X86_64_AMD_IOMMU_DEFS_H */
......@@ -27,13 +27,15 @@
list_for_each_entry(amd_iommu, \
&amd_iommu_head, list)
#define for_each_pdev(domain, pdev) \
list_for_each_entry(pdev, \
&(domain->arch.hvm_domain.hvm_iommu.pdev_list), list)
#define DMA_32BIT_MASK 0x00000000ffffffffULL
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
#define PAGE_SHIFT_4K (12)
#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
typedef int (*iommu_detect_callback_ptr_t)(u8 bus, u8 dev, u8 func, u8 cap_ptr);
typedef int (*iommu_detect_callback_ptr_t)(
u8 bus, u8 dev, u8 func, u8 cap_ptr);
/* amd-iommu-detect functions */
int __init scan_for_iommu(iommu_detect_callback_ptr_t iommu_detect_callback);
......@@ -49,16 +51,20 @@ void __init register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu);
void __init enable_iommu(struct amd_iommu *iommu);
/* mapping functions */
int amd_iommu_map_page(struct domain *d, unsigned long gfn,
unsigned long mfn);
int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry);
/* device table functions */
void amd_iommu_set_dev_table_entry(u32 *dte,
u64 root_ptr, u16 domain_id, u8 paging_mode);
int amd_iommu_is_dte_page_translation_valid(u32 *entry);
void invalidate_dev_table_entry(struct amd_iommu *iommu,
u16 devic_id);
/* send cmd to iommu */
int send_iommu_command(struct amd_iommu *iommu, u32 cmd[]);
void flush_command_buffer(struct amd_iommu *iommu);
/* iommu domain funtions */
int amd_iommu_domain_init(struct domain *domain);
......
......@@ -422,8 +422,6 @@ struct poll_info {
#define VTD_PAGE_TABLE_LEVEL_3 3
#define VTD_PAGE_TABLE_LEVEL_4 4
typedef paddr_t dma_addr_t;
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
#define MAX_IOMMUS 32
#define MAX_IOMMU_REGS 0xc0
......@@ -447,8 +445,10 @@ struct ir_ctrl {
};
struct iommu_flush {
int (*context)(void *iommu, u16 did, u16 source_id, u8 function_mask, u64 type, int non_present_entry_flush);
int (*iotlb)(void *iommu, u16 did, u64 addr, unsigned int size_order, u64 type, int non_present_entry_flush);
int (*context)(void *iommu, u16 did, u16 source_id,
u8 function_mask, u64 type, int non_present_entry_flush);
int (*iotlb)(void *iommu, u16 did, u64 addr, unsigned int size_order,
u64 type, int non_present_entry_flush);
};
struct intel_iommu {
......
......@@ -28,7 +28,9 @@
#include <public/domctl.h>
extern int vtd_enabled;
extern int amd_iommu_enabled;
#define iommu_enabled ( amd_iommu_enabled || vtd_enabled )
#define domain_hvm_iommu(d) (&d->arch.hvm_domain.hvm_iommu)
#define domain_vmx_iommu(d) (&d->arch.hvm_domain.hvm_iommu.vmx_iommu)
#define iommu_qi_ctrl(iommu) (&(iommu->intel.qi_ctrl));
......@@ -72,9 +74,9 @@ int iommu_domain_init(struct domain *d);
void iommu_domain_destroy(struct domain *d);
int device_assigned(u8 bus, u8 devfn);
int assign_device(struct domain *d, u8 bus, u8 devfn);
int iommu_map_page(struct domain *d, dma_addr_t gfn, dma_addr_t mfn);
int iommu_unmap_page(struct domain *d, dma_addr_t gfn);
void iommu_flush(struct domain *d, dma_addr_t gfn, u64 *p2m_entry);
int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
int iommu_unmap_page(struct domain *d, unsigned long gfn);
void iommu_flush(struct domain *d, unsigned long gfn, u64 *p2m_entry);
void iommu_set_pgd(struct domain *d);
void iommu_domain_teardown(struct domain *d);
int hvm_do_IRQ_dpci(struct domain *d, unsigned int irq);
......@@ -89,4 +91,12 @@ void io_apic_write_remap_rte(unsigned int apic,
#define PT_IRQ_TIME_OUT MILLISECS(8)
#define VTDPREFIX "[VT-D]"
struct iommu_ops {
int (*init)(struct domain *d);
int (*assign_device)(struct domain *d, u8 bus, u8 devfn);
void (*teardown)(struct domain *d);
int (*map_page)(struct domain *d, unsigned long gfn, unsigned long mfn);
int (*unmap_page)(struct domain *d, unsigned long gfn);
};
#endif /* _IOMMU_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment