summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSuren A. Chilingaryan <csa@suren.me>2015-11-20 14:24:17 +0100
committerSuren A. Chilingaryan <csa@suren.me>2015-11-20 14:24:17 +0100
commitcc5107c6d3db46ba213db974c630fc0fa1b89d68 (patch)
tree6c16375da1d5c7c5d4d853c5e0ba7775bbc99c8a
parent517ecf828e60e1e364c3ab6e67c2acd8a3c1b0c1 (diff)
downloadpcitool-cc5107c6d3db46ba213db974c630fc0fa1b89d68.tar.gz
pcitool-cc5107c6d3db46ba213db974c630fc0fa1b89d68.tar.bz2
pcitool-cc5107c6d3db46ba213db974c630fc0fa1b89d68.tar.xz
pcitool-cc5107c6d3db46ba213db974c630fc0fa1b89d68.zip
Configure number of DMA buffers in IPEDMA and improve checking and reporting inconsistent kmem buffers while re-using
-rw-r--r--dma/ipe.c126
-rw-r--r--driver/ioctl.c10
-rw-r--r--driver/kmem.c8
-rw-r--r--pcilib/kmem.c152
-rw-r--r--pcilib/pcilib.h3
-rw-r--r--pcitool/cli.c6
6 files changed, 193 insertions, 112 deletions
diff --git a/dma/ipe.c b/dma/ipe.c
index 320955c..2c3f32c 100644
--- a/dma/ipe.c
+++ b/dma/ipe.c
@@ -110,6 +110,24 @@ void dma_ipe_free(pcilib_dma_context_t *vctx) {
}
+static void dma_ipe_disable(ipe_dma_t *ctx) {
+ // Disable DMA
+ WR(IPEDMA_REG_CONTROL, 0x0);
+ usleep(IPEDMA_RESET_DELAY);
+
+ // Reset DMA engine
+ WR(IPEDMA_REG_RESET, 0x1);
+ usleep(IPEDMA_RESET_DELAY);
+ WR(IPEDMA_REG_RESET, 0x0);
+ usleep(IPEDMA_RESET_DELAY);
+
+ // Reseting configured DMA pages
+ if (ctx->version < 3) {
+ WR(IPEDMA_REG2_PAGE_COUNT, 0);
+ }
+ usleep(IPEDMA_RESET_DELAY);
+}
+
int dma_ipe_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dma_flags_t flags) {
int err;
int mask = 32;
@@ -169,10 +187,10 @@ int dma_ipe_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dm
} else
ctx->page_size = IPEDMA_PAGE_SIZE;
- if (!pcilib_read_register(ctx->dmactx.pcilib, "dmaconf", "dma_pages", &value))
- ctx->dma_pages = value;
+ if ((!pcilib_read_register(ctx->dmactx.pcilib, "dmaconf", "dma_pages", &value))&&(value > 0))
+ ctx->ring_size = value;
else
- ctx->dma_pages = IPEDMA_DMA_PAGES;
+ ctx->ring_size = IPEDMA_DMA_PAGES;
if (!pcilib_read_register(ctx->dmactx.pcilib, "dmaconf", "ipedma_flags", &value))
ctx->dma_flags = value;
@@ -191,35 +209,56 @@ int dma_ipe_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dm
kflags = PCILIB_KMEM_FLAG_REUSE|PCILIB_KMEM_FLAG_EXCLUSIVE|PCILIB_KMEM_FLAG_HARDWARE|(ctx->preserve?PCILIB_KMEM_FLAG_PERSISTENT:0);
pcilib_kmem_handle_t *desc = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, PCILIB_KMEM_TYPE_CONSISTENT, 1, IPEDMA_DESCRIPTOR_SIZE, IPEDMA_DESCRIPTOR_ALIGNMENT, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_RING, 0x00), kflags);
- pcilib_kmem_handle_t *pages = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, PCILIB_KMEM_TYPE_DMA_C2S_PAGE, IPEDMA_DMA_PAGES, ctx->page_size, 0, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_PAGES, 0x00), kflags);
+ pcilib_kmem_handle_t *pages = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, PCILIB_KMEM_TYPE_DMA_C2S_PAGE, ctx->ring_size, ctx->page_size, 0, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_PAGES, 0x00), kflags);
if (!desc||!pages) {
- if (pages) pcilib_free_kernel_memory(ctx->dmactx.pcilib, pages, 0);
- if (desc) pcilib_free_kernel_memory(ctx->dmactx.pcilib, desc, 0);
+ if (pages) pcilib_free_kernel_memory(ctx->dmactx.pcilib, pages, KMEM_FLAG_REUSE);
+ if (desc) pcilib_free_kernel_memory(ctx->dmactx.pcilib, desc, KMEM_FLAG_REUSE);
+ printf("%lu\n", IPEDMA_DESCRIPTOR_SIZE);
+ pcilib_error("Can't allocate required kernel memory for IPEDMA engine (%lu pages of %lu bytes + %lu byte descriptor)", ctx->ring_size, ctx->page_size, (unsigned long)IPEDMA_DESCRIPTOR_SIZE);
return PCILIB_ERROR_MEMORY;
}
reuse_desc = pcilib_kmem_is_reused(ctx->dmactx.pcilib, desc);
reuse_pages = pcilib_kmem_is_reused(ctx->dmactx.pcilib, pages);
- if (reuse_desc == reuse_pages) {
- if (reuse_desc & PCILIB_KMEM_REUSE_PARTIAL) pcilib_warning("Inconsistent DMA buffers are found (only part of required buffers is available), reinitializing...");
- else if (reuse_desc & PCILIB_KMEM_REUSE_REUSED) {
- if ((reuse_desc & PCILIB_KMEM_REUSE_PERSISTENT) == 0) pcilib_warning("Lost DMA buffers are found (non-persistent mode), reinitializing...");
- else if ((reuse_desc & PCILIB_KMEM_REUSE_HARDWARE) == 0) pcilib_warning("Lost DMA buffers are found (missing HW reference), reinitializing...");
+ if ((reuse_pages & PCILIB_KMEM_REUSE_PARTIAL)||(reuse_desc & PCILIB_KMEM_REUSE_PARTIAL)) {
+ dma_ipe_disable(ctx);
+
+ pcilib_free_kernel_memory(ctx->dmactx.pcilib, pages, KMEM_FLAG_REUSE);
+ pcilib_free_kernel_memory(ctx->dmactx.pcilib, desc, KMEM_FLAG_REUSE);
+
+ if ((flags&PCILIB_DMA_FLAG_STOP) == 0) {
+ pcilib_error("Inconsistent DMA buffers are found (buffers are only partially re-used). This is very wrong, please stop DMA engine and correct configuration...");
+ return PCILIB_ERROR_INVALID_STATE;
+ }
+
+ pcilib_warning("Inconsistent DMA buffers are found (buffers are only partially re-used), reinitializing...");
+ desc = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, PCILIB_KMEM_TYPE_CONSISTENT, 1, IPEDMA_DESCRIPTOR_SIZE, IPEDMA_DESCRIPTOR_ALIGNMENT, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_RING, 0x00), kflags|PCILIB_KMEM_FLAG_MASS);
+ pages = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, PCILIB_KMEM_TYPE_DMA_C2S_PAGE, ctx->ring_size, ctx->page_size, 0, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_PAGES, 0x00), kflags|PCILIB_KMEM_FLAG_MASS);
+
+ if (!desc||!pages) {
+ if (pages) pcilib_free_kernel_memory(ctx->dmactx.pcilib, pages, KMEM_FLAG_REUSE);
+ if (desc) pcilib_free_kernel_memory(ctx->dmactx.pcilib, desc, KMEM_FLAG_REUSE);
+ return PCILIB_ERROR_MEMORY;
+ }
+ } else if (reuse_desc != reuse_pages) {
+ pcilib_warning("Inconsistent DMA buffers (modes of ring and page buffers does not match), reinitializing....");
+ } else if (reuse_desc & PCILIB_KMEM_REUSE_REUSED) {
+ if ((reuse_desc & PCILIB_KMEM_REUSE_PERSISTENT) == 0) pcilib_warning("Lost DMA buffers are found (non-persistent mode), reinitializing...");
+ else if ((reuse_desc & PCILIB_KMEM_REUSE_HARDWARE) == 0) pcilib_warning("Lost DMA buffers are found (missing HW reference), reinitializing...");
+ else {
+ if (ctx->streaming)
+ preserve = 1;
else {
- if (ctx->streaming)
+ RD(IPEDMA_REG2_PAGE_COUNT, value);
+
+ if (value != ctx->ring_size)
+ pcilib_warning("Inconsistent DMA buffers are found (Number of allocated buffers (%lu) does not match current request (%lu)), reinitializing...", value + 1, IPEDMA_DMA_PAGES);
+ else
preserve = 1;
- else {
- RD(IPEDMA_REG2_PAGE_COUNT, value);
-
- if (value != IPEDMA_DMA_PAGES)
- pcilib_warning("Inconsistent DMA buffers are found (Number of allocated buffers (%lu) does not match current request (%lu)), reinitializing...", value + 1, IPEDMA_DMA_PAGES);
- else
- preserve = 1;
- }
- }
+ }
}
- } else pcilib_warning("Inconsistent DMA buffers (modes of ring and page buffers does not match), reinitializing....");
+ }
desc_va = pcilib_kmem_get_ua(ctx->dmactx.pcilib, desc);
if (ctx->version < 3) {
@@ -242,7 +281,7 @@ int dma_ipe_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dm
RD(ctx->reg_last_read, value);
// Numbered from 1 in FPGA
# ifdef IPEDMA_BUG_LAST_READ
- if (value == IPEDMA_DMA_PAGES)
+ if (value == ctx->ring_size)
value = 0;
# else /* IPEDMA_BUG_LAST_READ */
value--;
@@ -251,16 +290,8 @@ int dma_ipe_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dm
ctx->last_read = value;
} else {
ctx->reused = 0;
-
- // Disable DMA
- WR(IPEDMA_REG_CONTROL, 0x0);
- usleep(IPEDMA_RESET_DELAY);
- // Reset DMA engine
- WR(IPEDMA_REG_RESET, 0x1);
- usleep(IPEDMA_RESET_DELAY);
- WR(IPEDMA_REG_RESET, 0x0);
- usleep(IPEDMA_RESET_DELAY);
+ dma_ipe_disable(ctx);
// Verify PCIe link status
RD(IPEDMA_REG_RESET, value);
@@ -284,9 +315,9 @@ int dma_ipe_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dm
// Setting current read position and configuring progress register
#ifdef IPEDMA_BUG_LAST_READ
- WR(ctx->reg_last_read, IPEDMA_DMA_PAGES - 1);
+ WR(ctx->reg_last_read, ctx->ring_size - 1);
#else /* IPEDMA_BUG_LAST_READ */
- WR(ctx->reg_last_read, IPEDMA_DMA_PAGES);
+ WR(ctx->reg_last_read, ctx->ring_size);
#endif /* IPEDMA_BUG_LAST_READ */
if (ctx->version < 3) {
@@ -304,7 +335,7 @@ int dma_ipe_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dm
// In ring buffer mode, the hardware taking care to preserve an empty buffer to help distinguish between
// completely empty and completely full cases. In streaming mode, it is our responsibility to track this
// information. Therefore, we always keep the last buffer free
- num_pages = IPEDMA_DMA_PAGES;
+ num_pages = ctx->ring_size;
if (ctx->streaming) num_pages--;
for (i = 0; i < num_pages; i++) {
@@ -330,7 +361,7 @@ int dma_ipe_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dm
// Enable DMA
WR(IPEDMA_REG_CONTROL, 0x1);
- ctx->last_read = IPEDMA_DMA_PAGES - 1;
+ ctx->last_read = ctx->ring_size - 1;
}
ctx->last_read_addr = pcilib_kmem_get_block_ba(ctx->dmactx.pcilib, pages, ctx->last_read);
@@ -338,8 +369,6 @@ int dma_ipe_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dm
ctx->desc = desc;
ctx->pages = pages;
- ctx->ring_size = IPEDMA_DMA_PAGES;
-
return 0;
}
@@ -364,22 +393,7 @@ int dma_ipe_stop(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dma
ctx->started = 0;
- // Disable DMA
- WR(IPEDMA_REG_CONTROL, 0);
- usleep(IPEDMA_RESET_DELAY);
-
- // Reset DMA engine
- WR(IPEDMA_REG_RESET, 0x1);
- usleep(IPEDMA_RESET_DELAY);
- WR(IPEDMA_REG_RESET, 0x0);
- usleep(IPEDMA_RESET_DELAY);
-
- // Reseting configured DMA pages
- if (ctx->version < 3) {
- WR(IPEDMA_REG2_PAGE_COUNT, 0);
- }
-
- usleep(IPEDMA_RESET_DELAY);
+ dma_ipe_disable(ctx);
}
// Clean buffers
@@ -633,7 +647,7 @@ int dma_ipe_stream_read(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uin
size_t last_free;
// We always keep 1 buffer free to distinguish between completely full and empty cases
if (cur_read) last_free = cur_read - 1;
- else last_free = IPEDMA_DMA_PAGES - 1;
+ else last_free = ctx->ring_size - 1;
uintptr_t buf_ba = pcilib_kmem_get_block_ba(ctx->dmactx.pcilib, ctx->pages, last_free);
if (ctx->version < 3) {
@@ -651,7 +665,7 @@ int dma_ipe_stream_read(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uin
// Numbered from 1
#ifdef IPEDMA_BUG_LAST_READ
- WR(ctx->reg_last_read, cur_read?cur_read:IPEDMA_DMA_PAGES);
+ WR(ctx->reg_last_read, cur_read?cur_read:ctx->ring_size);
#else /* IPEDMA_BUG_LAST_READ */
WR(ctx->reg_last_read, cur_read + 1);
#endif /* IPEDMA_BUG_LAST_READ */
diff --git a/driver/ioctl.c b/driver/ioctl.c
index 0814fc8..26618c0 100644
--- a/driver/ioctl.c
+++ b/driver/ioctl.c
@@ -181,15 +181,13 @@ static int ioctl_pci_info(pcidriver_privdata_t *privdata, unsigned long arg)
*/
static int ioctl_kmem_alloc(pcidriver_privdata_t *privdata, unsigned long arg)
{
- int ret;
- READ_FROM_USER(kmem_handle_t, khandle);
-
- if ((ret = pcidriver_kmem_alloc(privdata, &khandle)) != 0)
- return ret;
+ int err, ret;
+ READ_FROM_USER(kmem_handle_t, khandle);
+ err = pcidriver_kmem_alloc(privdata, &khandle);
WRITE_TO_USER(kmem_handle_t, khandle);
- return 0;
+ return err;
}
/**
diff --git a/driver/kmem.c b/driver/kmem.c
index 7539ae6..618b5bf 100644
--- a/driver/kmem.c
+++ b/driver/kmem.c
@@ -53,23 +53,27 @@ int pcidriver_kmem_alloc(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_han
} else {
if (kmem_handle->type != kmem_entry->type) {
mod_info("Invalid type of reusable kmem_entry, currently: %lu, but requested: %lu\n", kmem_entry->type, kmem_handle->type);
+ kmem_handle->type = kmem_entry->type;
return -EINVAL;
}
if (((kmem_handle->type&PCILIB_KMEM_TYPE_MASK) == PCILIB_KMEM_TYPE_PAGE)&&(kmem_handle->size == 0)) {
- kmem_handle->size = kmem_entry->size;
+ kmem_handle->size = kmem_entry->size;
} else if (kmem_handle->size != kmem_entry->size) {
mod_info("Invalid size of reusable kmem_entry, currently: %lu, but requested: %lu\n", kmem_entry->size, kmem_handle->size);
+ kmem_handle->size = kmem_entry->size;
return -EINVAL;
}
-
+
if (kmem_handle->align != kmem_entry->align) {
mod_info("Invalid alignment of reusable kmem_entry, currently: %lu, but requested: %lu\n", kmem_entry->align, kmem_handle->align);
+ kmem_handle->align = kmem_entry->align;
return -EINVAL;
}
if (((kmem_entry->mode&KMEM_MODE_EXCLUSIVE)?1:0) != ((flags&KMEM_FLAG_EXCLUSIVE)?1:0)) {
mod_info("Invalid mode of reusable kmem_entry\n");
+ kmem_handle->flags = (kmem_entry->mode&KMEM_MODE_EXCLUSIVE)?KMEM_FLAG_EXCLUSIVE:0;
return -EINVAL;
}
}
diff --git a/pcilib/kmem.c b/pcilib/kmem.c
index b1d2c5c..4e240c3 100644
--- a/pcilib/kmem.c
+++ b/pcilib/kmem.c
@@ -46,8 +46,8 @@ static void pcilib_cancel_kernel_memory(pcilib_t *ctx, pcilib_kmem_list_t *kbuf,
if (last_flags) {
pcilib_kmem_flags_t failed_flags = flags;
- if (last_flags&KMEM_FLAG_REUSED_PERSISTENT) flags&=~PCILIB_KMEM_FLAG_PERSISTENT;
- if (last_flags&KMEM_FLAG_REUSED_HW) flags&=~PCILIB_KMEM_FLAG_HARDWARE;
+ if (last_flags&KMEM_FLAG_REUSED_PERSISTENT) failed_flags&=~PCILIB_KMEM_FLAG_PERSISTENT;
+ if (last_flags&KMEM_FLAG_REUSED_HW) failed_flags&=~PCILIB_KMEM_FLAG_HARDWARE;
if (failed_flags != flags) {
ret = pcilib_free_kernel_buffer(ctx, kbuf, --kbuf->buf.n_blocks, failed_flags);
@@ -60,10 +60,10 @@ static void pcilib_cancel_kernel_memory(pcilib_t *ctx, pcilib_kmem_list_t *kbuf,
pcilib_kmem_handle_t *pcilib_alloc_kernel_memory(pcilib_t *ctx, pcilib_kmem_type_t type, size_t nmemb, size_t size, size_t alignment, pcilib_kmem_use_t use, pcilib_kmem_flags_t flags) {
int err = 0;
- const char *error = NULL;
+ char error[256];
int ret;
- int i;
+ size_t i, allocated = nmemb;
void *addr;
pcilib_tristate_t reused = PCILIB_TRISTATE_NO;
@@ -92,7 +92,7 @@ pcilib_kmem_handle_t *pcilib_alloc_kernel_memory(pcilib_t *ctx, pcilib_kmem_type
pcilib_error("PCIDRIVER_IOC_MMAP_MODE ioctl have failed");
return NULL;
}
-
+
kh.type = type;
kh.size = size;
kh.align = alignment;
@@ -104,10 +104,13 @@ pcilib_kmem_handle_t *pcilib_alloc_kernel_memory(pcilib_t *ctx, pcilib_kmem_type
kh.size += alignment;
}
- for ( i = 0; i < nmemb; i++) {
+ for ( i = 0; (i < nmemb)||(flags&PCILIB_KMEM_FLAG_MASS); i++) {
kh.item = i;
kh.flags = flags;
+ if (i >= nmemb)
+ kh.flags |= KMEM_FLAG_TRY;
+
if ((type&PCILIB_KMEM_TYPE_MASK) == PCILIB_KMEM_TYPE_REGION) {
kh.pa = alignment + i * size;
}
@@ -115,10 +118,43 @@ pcilib_kmem_handle_t *pcilib_alloc_kernel_memory(pcilib_t *ctx, pcilib_kmem_type
ret = ioctl(ctx->handle, PCIDRIVER_IOC_KMEM_ALLOC, &kh);
if (ret) {
kbuf->buf.n_blocks = i;
- error = "PCIDRIVER_IOC_KMEM_ALLOC ioctl have failed";
+ if ((i < nmemb)||(errno != ENOENT)) {
+ err = PCILIB_ERROR_FAILED;
+ if (errno == EINVAL) {
+ if (kh.type != type)
+ sprintf(error, "Driver prevents us from re-using buffer (use 0x%x, block: %zu), we have requested type %u but buffer is of type %lu", use, i, type, kh.type);
+ else if (kh.size != size)
+ sprintf(error, "Driver prevents us from re-using buffer (use 0x%x, block: %zu), we have requested size %lu but buffer is of size %lu", use, i, size, kh.size);
+ else if (kh.align != alignment)
+ sprintf(error, "Driver prevents us from re-using buffer (use 0x%x, block: %zu), we have requested alignment %lu but buffer is of alignment %lu", use, i, size, kh.size);
+ else if ((kh.flags&KMEM_FLAG_EXCLUSIVE) != (flags&KMEM_FLAG_EXCLUSIVE))
+ sprintf(error, "Driver prevents us from re-using buffer (use 0x%x, block: %zu), we have requested size %s but buffer is of size %s", use, i, ((flags&KMEM_FLAG_EXCLUSIVE)?"exclusive":"non-exclusive"), ((kh.flags&KMEM_FLAG_EXCLUSIVE)?"exclusive":"non exclusive"));
+ else
+ sprintf(error, "Driver prevents us from re-using buffer (use 0x%x, block: %zu), unknown consistency error", use, i);
+ } else if (errno == EBUSY) {
+ sprintf(error, "Driver prevents us from re-using buffer (use 0x%x, block: %zu), reuse counter of kmem_entry is overflown", use, i);
+ } else if (errno == ENOMEM) {
+ sprintf(error, "Driver prevents us from re-using buffer (use 0x%x, block: %zu), memory allocation (%zu bytes) failed", use, i, size);
+ } else {
+ sprintf(error, "Driver prevents us from re-using buffer (use 0x%x, block: %zu), PCIDRIVER_IOC_KMEM_ALLOC ioctl have failed with errno %i", use, i, errno);
+ }
+ }
break;
}
+ if (i >= allocated) {
+ void *kbuf_new = realloc(kbuf, sizeof(pcilib_kmem_list_t) + 2 * allocated * sizeof(pcilib_kmem_addr_t));
+ if (!kbuf_new) {
+ kbuf->buf.n_blocks = i;
+ err = PCILIB_ERROR_MEMORY;
+ sprintf(error, "Failed to allocate extra %zu bytes of user-space memory for kmem structures", allocated * sizeof(pcilib_kmem_addr_t));
+ break;
+ }
+ memset(kbuf_new + sizeof(pcilib_kmem_list_t) + allocated * sizeof(pcilib_kmem_addr_t) , 0, allocated * sizeof(pcilib_kmem_addr_t));
+ kbuf = kbuf_new;
+ allocated *= 2;
+ }
+
kbuf->buf.blocks[i].handle_id = kh.handle_id;
kbuf->buf.blocks[i].pa = kh.pa;
kbuf->buf.blocks[i].size = kh.size;
@@ -135,27 +171,44 @@ pcilib_kmem_handle_t *pcilib_alloc_kernel_memory(pcilib_t *ctx, pcilib_kmem_type
else*/ persistent = (kh.flags&KMEM_FLAG_REUSED_PERSISTENT)?1:0;
} else if ((kh.flags&KMEM_FLAG_REUSED_PERSISTENT) == 0) err = PCILIB_ERROR_INVALID_STATE;
} else if (kh.flags&KMEM_FLAG_REUSED_PERSISTENT) err = PCILIB_ERROR_INVALID_STATE;
-
+ if (err) {
+ kbuf->buf.n_blocks = i + 1;
+ sprintf(error, "Mistmatch in persistent modes of the re-used kmem blocks. Current buffer (use 0x%x, block: %zu) is %s, but prior ones %s",
+ use, i, ((kh.flags&KMEM_FLAG_REUSED_PERSISTENT)?"persistent":"not persistent"), (persistent?"are":"are not"));
+ break;
+ }
+
if (hardware) {
if (hardware < 0) {
/*if (((flags&PCILIB_KMEM_FLAG_HARDWARE) == 0)&&(kh.flags&KMEM_FLAG_REUSED_HW)) err = PCILIB_ERROR_INVALID_STATE;
else*/ hardware = (kh.flags&KMEM_FLAG_REUSED_HW)?1:0;
} else if ((kh.flags&KMEM_FLAG_REUSED_HW) == 0) err = PCILIB_ERROR_INVALID_STATE;
} else if (kh.flags&KMEM_FLAG_REUSED_HW) err = PCILIB_ERROR_INVALID_STATE;
-
+ if (err) {
+ kbuf->buf.n_blocks = i + 1;
+ sprintf(error, "Mistmatch in hardware modes of the re-used kmem blocks. Current buffer (use 0x%x, block: %zu) is %s, but prior ones %s",
+ use, i, ((kh.flags&KMEM_FLAG_REUSED_HW)?"hardware-locked":"not hardware-locked"), (hardware?"are":"are not"));
+ break;
+ }
} else {
if (!i) reused = PCILIB_TRISTATE_NO;
else if (reused) reused = PCILIB_TRISTATE_PARTIAL;
-
- if ((persistent > 0)&&((flags&PCILIB_KMEM_FLAG_PERSISTENT) == 0)) err = PCILIB_ERROR_INVALID_STATE;
- if ((hardware > 0)&&((flags&PCILIB_KMEM_FLAG_HARDWARE) == 0)) err = PCILIB_ERROR_INVALID_STATE;
+
+ if ((persistent > 0)&&((flags&PCILIB_KMEM_FLAG_PERSISTENT) == 0)) {
+ err = PCILIB_ERROR_INVALID_STATE;
+ sprintf(error, "Expecting to re-use persistent blocks, but buffer (use 0x%x, block: %zu) is not", use, i);
+ }
+ else if ((hardware > 0)&&((flags&PCILIB_KMEM_FLAG_HARDWARE) == 0)) {
+ err = PCILIB_ERROR_INVALID_STATE;
+ sprintf(error, "Expecting to re-use hardware-locked blocks, but buffer (use 0x%x, block: %zu) is not", use, i);
+ }
+ if (err) {
+ kbuf->buf.n_blocks = i + 1;
+ break;
+ }
}
- if (err) {
- kbuf->buf.n_blocks = i + 1;
- break;
- }
-
+
if ((kh.align)&&((kh.type&PCILIB_KMEM_TYPE_MASK) != PCILIB_KMEM_TYPE_PAGE)) {
if (kh.pa % kh.align) kbuf->buf.blocks[i].alignment_offset = kh.align - kh.pa % kh.align;
kbuf->buf.blocks[i].size -= kh.align;
@@ -164,18 +217,34 @@ pcilib_kmem_handle_t *pcilib_alloc_kernel_memory(pcilib_t *ctx, pcilib_kmem_type
addr = mmap( 0, kbuf->buf.blocks[i].size + kbuf->buf.blocks[i].alignment_offset, PROT_WRITE | PROT_READ, MAP_SHARED, ctx->handle, 0 );
if ((!addr)||(addr == MAP_FAILED)) {
kbuf->buf.n_blocks = i + 1;
- error = "Failed to mmap allocated kernel memory";
+ err = PCILIB_ERROR_FAILED;
+ sprintf(error, "Driver prevents us from mmaping buffer (use 0x%x, block: %zu), mmap have failed with errno %i", use, i, errno);
break;
}
kbuf->buf.blocks[i].ua = addr;
-// if (use == PCILIB_KMEM_USE_DMA_PAGES) {
-// memset(addr, 10, kbuf->buf.blocks[i].size + kbuf->buf.blocks[i].alignment_offset);
-// }
-
kbuf->buf.blocks[i].mmap_offset = kh.pa & ctx->page_mask;
}
+ if (err) kbuf->buf.n_blocks = i + 1;
+ else kbuf->buf.n_blocks = i;
+
+
+ // Check if there are more unpicked buffers
+ if ((!err)&&((flags&PCILIB_KMEM_FLAG_MASS) == 0)&&(reused == PCILIB_TRISTATE_YES)&&((type&PCILIB_KMEM_TYPE_MASK) != PCILIB_KMEM_TYPE_REGION)) {
+ kh.item = kbuf->buf.n_blocks;
+ kh.flags = KMEM_FLAG_REUSE|KMEM_FLAG_TRY;
+
+ ret = ioctl(ctx->handle, PCIDRIVER_IOC_KMEM_ALLOC, &kh);
+ if (!ret) {
+ kh.flags = KMEM_FLAG_REUSE;
+ ioctl(ctx->handle, PCIDRIVER_IOC_KMEM_FREE, &kh);
+ reused = PCILIB_TRISTATE_PARTIAL;
+ } else if (errno != ENOENT) {
+ reused = PCILIB_TRISTATE_PARTIAL;
+ }
+ }
+
pcilib_unlock_global(ctx);
@@ -183,37 +252,32 @@ pcilib_kmem_handle_t *pcilib_alloc_kernel_memory(pcilib_t *ctx, pcilib_kmem_type
if (persistent < 0) persistent = 0;
if (hardware < 0) hardware = 0;
- if (err||error) {
- pcilib_kmem_flags_t free_flags = 0;
-
- // for the sake of simplicity always clean partialy reused buffers
- if ((persistent == PCILIB_TRISTATE_PARTIAL)||((persistent <= 0)&&(flags&PCILIB_KMEM_FLAG_PERSISTENT))) {
- free_flags |= PCILIB_KMEM_FLAG_PERSISTENT;
- }
-
- if ((hardware <= 0)&&(flags&PCILIB_KMEM_FLAG_HARDWARE)) {
- free_flags |= PCILIB_KMEM_FLAG_HARDWARE;
- }
-
- // do not clean if we have reused peresistent buffers
- // we don't care about -1, because it will be the value only if no buffers actually allocated
- if ((!persistent)||(reused != PCILIB_TRISTATE_YES)) {
- pcilib_cancel_kernel_memory(ctx, kbuf, free_flags, err?kh.flags:0);
+ if (err) {
+ // do not clean if we have reused (even partially) persistent/hardware-locked buffers
+ if (((persistent)||(hardware))&&(reused != PCILIB_TRISTATE_NO)) {
+ pcilib_cancel_kernel_memory(ctx, kbuf, KMEM_FLAG_REUSE, 0);
+ } else {
+ pcilib_kmem_flags_t free_flags = 0;
+ if (flags&PCILIB_KMEM_FLAG_PERSISTENT) {
+ free_flags |= PCILIB_KMEM_FLAG_PERSISTENT;
+ }
+ if (flags&PCILIB_KMEM_FLAG_HARDWARE) {
+ free_flags |= PCILIB_KMEM_FLAG_HARDWARE;
+ }
+ // err indicates consistensy error. The last ioctl have succeeded and we need to clean it in a special way
+ pcilib_cancel_kernel_memory(ctx, kbuf, free_flags, (err == PCILIB_ERROR_INVALID_STATE)?kh.flags:0);
}
- if (!error) error = "Reused buffers are inconsistent";
- pcilib_error(error);
-
+ pcilib_warning("Error %i: %s", err, error);
return NULL;
}
if (nmemb == 1) {
memcpy(&kbuf->buf.addr, &kbuf->buf.blocks[0], sizeof(pcilib_kmem_addr_t));
}
-
+
kbuf->buf.reused = reused|(persistent?PCILIB_KMEM_REUSE_PERSISTENT:0)|(hardware?PCILIB_KMEM_REUSE_HARDWARE:0);
- kbuf->buf.n_blocks = nmemb;
-
+
kbuf->prev = NULL;
kbuf->next = ctx->kmem_list;
if (ctx->kmem_list) ctx->kmem_list->prev = kbuf;
diff --git a/pcilib/pcilib.h b/pcilib/pcilib.h
index fbab883..cfe3e96 100644
--- a/pcilib/pcilib.h
+++ b/pcilib/pcilib.h
@@ -86,7 +86,8 @@ typedef enum {
PCILIB_DMA_FLAG_WAIT = 2, /**< wait completion of write operation / wait for data during read operation */
PCILIB_DMA_FLAG_MULTIPACKET = 4, /**< read multiple packets */
PCILIB_DMA_FLAG_PERSISTENT = 8, /**< do not stop DMA engine on application termination / permanently close DMA engine on dma_stop */
- PCILIB_DMA_FLAG_IGNORE_ERRORS = 16 /**< do not crash on errors, but return appropriate error codes */
+ PCILIB_DMA_FLAG_IGNORE_ERRORS = 16, /**< do not crash on errors, but return appropriate error codes */
+ PCILIB_DMA_FLAG_STOP = 32 /**< indicates that we actually calling pcilib_dma_start to stop persistent DMA engine */
} pcilib_dma_flags_t;
typedef enum {
diff --git a/pcitool/cli.c b/pcitool/cli.c
index b54ba9d..0a070d7 100644
--- a/pcitool/cli.c
+++ b/pcitool/cli.c
@@ -2333,7 +2333,7 @@ int StartStopDMA(pcilib_t *handle, const pcilib_model_description_t *model_info
if (start) Error("DMA engine should be specified");
for (dmaid = 0; dma_info->engines[dmaid].addr_bits; dmaid++) {
- err = pcilib_start_dma(handle, dmaid, 0);
+ err = pcilib_start_dma(handle, dmaid, PCILIB_DMA_FLAG_STOP);
if (err) Error("Error starting DMA Engine (%s %i)", ((dma_info->engines[dmaid].direction == PCILIB_DMA_FROM_DEVICE)?"C2S":"S2C"), dma_info->engines[dmaid].addr);
err = pcilib_stop_dma(handle, dmaid, PCILIB_DMA_FLAG_PERSISTENT);
if (err) Error("Error stopping DMA Engine (%s %i)", ((dma_info->engines[dmaid].direction == PCILIB_DMA_FROM_DEVICE)?"C2S":"S2C"), dma_info->engines[dmaid].addr);
@@ -2350,7 +2350,7 @@ int StartStopDMA(pcilib_t *handle, const pcilib_model_description_t *model_info
err = pcilib_start_dma(handle, dmaid, PCILIB_DMA_FLAG_PERSISTENT);
if (err) Error("Error starting DMA engine (C2S %lu)", dma);
} else {
- err = pcilib_start_dma(handle, dmaid, 0);
+ err = pcilib_start_dma(handle, dmaid, PCILIB_DMA_FLAG_STOP);
if (err) Error("Error starting DMA engine (C2S %lu)", dma);
err = pcilib_stop_dma(handle, dmaid, PCILIB_DMA_FLAG_PERSISTENT);
if (err) Error("Error stopping DMA engine (C2S %lu)", dma);
@@ -2365,7 +2365,7 @@ int StartStopDMA(pcilib_t *handle, const pcilib_model_description_t *model_info
err = pcilib_start_dma(handle, dmaid, PCILIB_DMA_FLAG_PERSISTENT);
if (err) Error("Error starting DMA engine (S2C %lu)", dma);
} else {
- err = pcilib_start_dma(handle, dmaid, 0);
+ err = pcilib_start_dma(handle, dmaid, PCILIB_DMA_FLAG_STOP);
if (err) Error("Error starting DMA engine (S2C %lu)", dma);
err = pcilib_stop_dma(handle, dmaid, PCILIB_DMA_FLAG_PERSISTENT);
if (err) Error("Error stopping DMA engine (S2C %lu)", dma);