sysutils/bhyve+: remove unused patch

This commit is contained in:
Rene Ladan 2021-10-17 22:50:32 +02:00
parent 1edfb96d8f
commit 93566bfca3

View File

@ -1,132 +0,0 @@
--- usr.sbin/bhyve/pci_emul.h.orig 2021-08-19 23:00:57 UTC
+++ usr.sbin/bhyve/pci_emul.h
@@ -146,6 +146,7 @@ struct pci_devinst {
struct msix_table_entry *table; /* allocated at runtime */
void *pba_page;
int pba_page_offset;
+ void *table_page;
} pi_msix;
void *pi_arg; /* devemu-private data */
--- usr.sbin/bhyve/pci_passthru.c.orig 2021-08-19 23:00:57 UTC
+++ usr.sbin/bhyve/pci_passthru.c
@@ -324,13 +324,14 @@ msix_table_read(struct passthru_softc *sc, uint64_t of
return (data);
}
+ /* Should make this an assert. */
if (offset < pi->pi_msix.table_offset)
return (-1);
offset -= pi->pi_msix.table_offset;
index = offset / MSIX_TABLE_ENTRY_SIZE;
if (index >= pi->pi_msix.table_count)
- return (-1);
+ goto readbar;
entry = &pi->pi_msix.table[index];
entry_offset = offset % MSIX_TABLE_ENTRY_SIZE;
@@ -357,6 +358,33 @@ msix_table_read(struct passthru_softc *sc, uint64_t of
}
return (data);
+
+readbar:
+ if (pi->pi_msix.table_page != NULL && offset < 4096) {
+ switch(size) {
+ case 1:
+ src8 = (uint8_t *)(pi->pi_msix.table_page + offset);
+ data = *src8;
+ break;
+ case 2:
+ src16 = (uint16_t *)(pi->pi_msix.table_page + offset);
+ data = *src16;
+ break;
+ case 4:
+ src32 = (uint32_t *)(pi->pi_msix.table_page + offset);
+ data = *src32;
+ break;
+ case 8:
+ src64 = (uint64_t *)(pi->pi_msix.table_page + offset);
+ data = *src64;
+ break;
+ default:
+ return (-1);
+ }
+ return (data);
+ }
+
+ return (-1);
}
static void
@@ -403,13 +431,14 @@ msix_table_write(struct vmctx *ctx, int vcpu, struct p
return;
}
+ /* Should make this an assert. */
if (offset < pi->pi_msix.table_offset)
return;
offset -= pi->pi_msix.table_offset;
index = offset / MSIX_TABLE_ENTRY_SIZE;
if (index >= pi->pi_msix.table_count)
- return;
+ goto writebar;
entry = &pi->pi_msix.table[index];
entry_offset = offset % MSIX_TABLE_ENTRY_SIZE;
@@ -432,6 +461,31 @@ msix_table_write(struct vmctx *ctx, int vcpu, struct p
entry->msg_data, entry->vector_control);
}
}
+
+writebar:
+ if (pi->pi_msix.table_page != NULL && offset < 4096) {
+ switch(size) {
+ case 1:
+ dest8 = (uint8_t *)(pi->pi_msix.table_page + offset);
+ *dest8 = data;
+ break;
+ case 2:
+ dest16 = (uint16_t *)(pi->pi_msix.table_page + offset);
+ *dest16 = data;
+ break;
+ case 4:
+ dest32 = (uint32_t *)(pi->pi_msix.table_page + offset);
+ *dest32 = data;
+ break;
+ case 8:
+ dest64 = (uint64_t *)(pi->pi_msix.table_page + offset);
+ *dest64 = data;
+ break;
+ default:
+ break;
+ }
+ return;
+ }
}
static int
@@ -466,6 +520,21 @@ init_msix_table(struct vmctx *ctx, struct passthru_sof
idx = pi->pi_msix.table_bar;
start = pi->pi_bar[idx].addr;
remaining = pi->pi_bar[idx].size;
+
+ /*
+ * Some device (against better documentation of the spec)
+ * are mapping other usable address space into the same page
+ * as the end of the MSI-X tables.
+ * At least Intel AX200 being one of them apparently.
+ * Map the page and fall back to it for any reads/writes outside
+ * the MSI-X table in msix_table_{read,write}.
+ */
+ pi->pi_msix.table_page = mmap(NULL, 4096, PROT_READ | PROT_WRITE,
+ MAP_SHARED, memfd, sc->psc_bar[idx].addr + table_offset);
+ if (pi->pi_msix.table_page == MAP_FAILED) {
+ warn("Failed to map table page for MSI-X on %d/%d/%d", b, s, f);
+ return (-1);
+ }
if (pi->pi_msix.pba_bar == pi->pi_msix.table_bar) {
pba_offset = pi->pi_msix.pba_offset;