summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--NOTES37
-rw-r--r--dma/nwl.c2
-rw-r--r--dma/nwl_buffers.h7
-rw-r--r--driver/base.c2
-rw-r--r--driver/common.h16
-rw-r--r--driver/kmem.c139
-rw-r--r--driver/pciDriver.h14
-rw-r--r--driver/sysfs.c2
-rw-r--r--error.h1
-rw-r--r--kmem.c94
-rw-r--r--kmem.h21
-rw-r--r--pcilib.h2
-rw-r--r--tools.h7
13 files changed, 268 insertions, 76 deletions
diff --git a/NOTES b/NOTES
index 9afd05a..6eea356 100644
--- a/NOTES
+++ b/NOTES
@@ -32,15 +32,16 @@ DMA Access Synchronization
neccessary. The usage counter is increased by kmem_alloc function and
decreased by kmem_free. Finally, the reference is obtained at returned
during mmap/munmap. So, on kmem_free, we do not clean
- a) reusable buffers with reference count above zero or hardware
- reference set
- b) non-exclusive buffers with usage counter above zero (For exclusive
+ a) buffers with reference count above zero or hardware reference set.
+ REUSE flag should be supplied, overwise the error is returned
+ b) PERSISTENT buffer. REUSE flash should be supplied, overwise the
+ error is returned
+ c) non-exclusive buffers with usage counter above zero (For exclusive
buffer the value of usage counter above zero just means that application
have failed without cleaning buffers first. There is no easy way to
detect that for shared buffers, so it is left as manual operation in
this case)
- c) any buffer if KMEM_FLAG_REUSE was provided to function (I don't have
- a clear idea why to call it at all, but I have feeling it can be useful
+ d) any buffer if KMEM_FLAG_REUSE was provided to function
During module unload, only buffers with references can prevent cleanup. In
this case the only possiblity to free the driver is to call kmem_free
passing FORCE flags.
@@ -63,21 +64,29 @@ DMA Access Synchronization
if buffer left in incoherent stage. This should be handled on upper level.
- At pcilib/kmem level synchronization of multiple buffers is performed
- Inconsistent buffer types:
+ * The HW reference and following modes should be consistent between member
+ parts: REUSABLE, PERSISTENT, EXCLUSIVE (only HW reference and PERSISTENT
+ mode should be checked, others are handled on dirver level)
+ * It is fine if only part of buffers are reused and others are newly
+ allocated. However, on higher level this can be checked and resulting
+ in failure.
+
+ Treatment of inconsistencies:
* Buffers are in PRESISTENT mode, but newly allocated, OK
* Buffers are reused, but are not in PERSISTENT mode (for EXCLUSIVE buffers
this means that application has crashed during the last execution), OK
* Some of buffers are reused (not just REUSABLE, but actually reused),
- others - not, FAIL
- * Some of buffers are REUSABLE, others - not, FAIL
- * Some of buffers are EXCLUSIVE, others - not, FAIL
- * Some of buffers are PERSISTENT, others - not, FAIL
- * Some of buffers are HW, others - not, FAIL (to simplify clean-up,
- even if we are going to set HW flag anyway)
+ others - not, OK until
+ a) either PERSISTENT flag is set or reused buffers are non-PERSISTENT
+ b) either HW flag is set or reused buffers does not hold HW reference
+ * PERSISTENT mode inconsistency, FAIL (even if we are going to set
+ PERSISTENT mode anyway)
+ * HW reference inconsistency, FAIL (even if we are going to set
+ HW flag anyway)
On allocation error at some of the buffer, call clean routine and
- * Preserve HW flag if buffers hold HW reference
- * Preserve PERSISTENT flag if buffers are in PERSISTENT mode
+ * Preserve HW reference if buffers held HW reference before
+ * Preserve PERSISTENT mode if buffers were in PERSISTENT mode before
* Remove REUSE flag, we want to clean if it is allowed by current buffer
status
* EXCLUSIVE flag is not important for kmem_free routine.
diff --git a/dma/nwl.c b/dma/nwl.c
index 6bb79c5..78a587e 100644
--- a/dma/nwl.c
+++ b/dma/nwl.c
@@ -26,7 +26,7 @@ int dma_nwl_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dm
if (dma == PCILIB_DMA_ENGINE_INVALID) return 0;
else if (dma > ctx->n_engines) return PCILIB_ERROR_INVALID_BANK;
- if (flags&PCILIB_DMA_FLAG_PERMANENT) ctx->engines[dma].preserve = 1;
+ if (flags&PCILIB_DMA_FLAG_PERSISTENT) ctx->engines[dma].preserve = 1;
return dma_nwl_start_engine(ctx, dma);
}
diff --git a/dma/nwl_buffers.h b/dma/nwl_buffers.h
index e0df2d0..a38af8c 100644
--- a/dma/nwl_buffers.h
+++ b/dma/nwl_buffers.h
@@ -22,6 +22,7 @@ int dma_nwl_allocate_engine_buffers(nwl_dma_t *ctx, pcilib_nwl_engine_descriptio
uint32_t buf_sz;
uint64_t buf_pa;
pcilib_kmem_reuse_t reuse_ring, reuse_pages;
+ pcilib_kmem_flags_t flags;
char *base = info->base_addr;
@@ -29,8 +30,9 @@ int dma_nwl_allocate_engine_buffers(nwl_dma_t *ctx, pcilib_nwl_engine_descriptio
// Or bidirectional specified by 0x0|addr, or read 0x0|addr and write 0x80|addr
sub_use = info->desc.addr|(info->desc.direction == PCILIB_DMA_TO_DEVICE)?0x80:0x00;
- pcilib_kmem_handle_t *ring = pcilib_alloc_kernel_memory(ctx->pcilib, PCILIB_KMEM_TYPE_CONSISTENT, 1, PCILIB_NWL_DMA_PAGES * PCILIB_NWL_DMA_DESCRIPTOR_SIZE, PCILIB_NWL_ALIGNMENT, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_RING, sub_use), PCILIB_KMEM_FLAG_REUSE);
- pcilib_kmem_handle_t *pages = pcilib_alloc_kernel_memory(ctx->pcilib, PCILIB_KMEM_TYPE_PAGE, PCILIB_NWL_DMA_PAGES, 0, 0, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_PAGES, sub_use), PCILIB_KMEM_FLAG_REUSE);
+ flags = PCILIB_KMEM_FLAG_REUSE|PCILIB_KMEM_FLAG_EXCLUSIVE|PCILIB_KMEM_FLAG_HARDWARE|info->preserve?PCILIB_KMEM_FLAG_PRESERVE:0;
+ pcilib_kmem_handle_t *ring = pcilib_alloc_kernel_memory(ctx->pcilib, PCILIB_KMEM_TYPE_CONSISTENT, 1, PCILIB_NWL_DMA_PAGES * PCILIB_NWL_DMA_DESCRIPTOR_SIZE, PCILIB_NWL_ALIGNMENT, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_RING, sub_use), flags);
+ pcilib_kmem_handle_t *pages = pcilib_alloc_kernel_memory(ctx->pcilib, PCILIB_KMEM_TYPE_PAGE, PCILIB_NWL_DMA_PAGES, 0, 0, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_PAGES, sub_use), flags);
if ((ring)&&(pages)) err = dma_nwl_sync_buffers(ctx, info, pages);
else err = PCILIB_ERROR_FAILED;
@@ -41,6 +43,7 @@ int dma_nwl_allocate_engine_buffers(nwl_dma_t *ctx, pcilib_nwl_engine_descriptio
return err;
}
+
/*
reuse_ring = pcilib_kmem_is_reused(ctx->pcilib, ring);
reuse_pages = pcilib_kmem_is_reused(ctx->pcilib, pages);
diff --git a/driver/base.c b/driver/base.c
index 37102d8..d88dcf2 100644
--- a/driver/base.c
+++ b/driver/base.c
@@ -353,8 +353,6 @@ static int __devinit pcidriver_probe(struct pci_dev *pdev, const struct pci_devi
spin_lock_init(&(privdata->kmemlist_lock));
atomic_set(&privdata->kmem_count, 0);
- privdata->kmem_cur = NULL;
-
INIT_LIST_HEAD(&(privdata->umem_list));
spin_lock_init(&(privdata->umemlist_lock));
atomic_set(&privdata->umem_count, 0);
diff --git a/driver/common.h b/driver/common.h
index e45ae25..b79d5ec 100644
--- a/driver/common.h
+++ b/driver/common.h
@@ -6,6 +6,14 @@
/*************************************************************************/
/* Private data types and structures */
+#define KMEM_REF_HW 0x80000000 /**< Special reference to indicate hardware access */
+#define KMEM_REF_COUNT 0x0FFFFFFF /**< Mask of reference counter (mmap/munmap) */
+
+#define KMEM_MODE_REUSABLE 0x80000000 /**< Indicates reusable buffer */
+#define KMEM_MODE_EXCLUSIVE 0x40000000 /**< Only a single process is allowed to mmap the buffer */
+#define KMEM_MODE_PERSISTENT 0x20000000 /**< Persistent mode instructs kmem_free to preserve buffer in memory */
+#define KMEM_MODE_COUNT 0x0FFFFFFF /**< Mask of reuse counter (alloc/free) */
+
/* Define an entry in the kmem list (this list is per device) */
/* This list keeps references to the allocated kernel buffers */
typedef struct {
@@ -15,8 +23,14 @@ typedef struct {
unsigned long cpua;
unsigned long size;
unsigned long type;
+
unsigned long use;
unsigned long item;
+
+ spinlock_t lock;
+ unsigned long mode;
+ unsigned long refs;
+
struct class_device_attribute sysfs_attr; /* initialized when adding the entry */
} pcidriver_kmem_entry_t;
@@ -57,7 +71,7 @@ typedef struct {
struct list_head kmem_list; /* List of 'kmem_list_entry's associated with this device */
atomic_t kmem_count; /* id for next kmem entry */
- pcidriver_kmem_entry_t *kmem_cur; /* Currently selected kmem buffer, for mmap */
+ int kmem_cur_id; /* Currently selected kmem buffer, for mmap */
spinlock_t umemlist_lock; /* Spinlock to lock umem list operations */
struct list_head umem_list; /* List of 'umem_list_entry's associated with this device */
diff --git a/driver/kmem.c b/driver/kmem.c
index ed2a2af..a720095 100644
--- a/driver/kmem.c
+++ b/driver/kmem.c
@@ -35,24 +35,48 @@ int pcidriver_kmem_alloc(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_han
pcidriver_kmem_entry_t *kmem_entry;
void *retptr;
- privdata->kmem_cur = NULL;
-
- if (kmem_handle->reuse) {
+ if (kmem_handle->flags&KMEM_FLAG_REUSE) {
/* kmem_entry = pcidriver_kmem_find_entry_use(privdata, kmem_handle->use, kmem_handle->item);
if (kmem_entry) {
- if (kmem_handle->type != kmem_entry->type) return EINVAL;
+ unsigned long flags = kmem_handle->flags;
+
+ if (kmem_handle->type != kmem_entry->type) {
+ mod_info("Invalid type of reusable kmem_entry\n");
+ return -EINVAL;
+ }
- if (kmem_handle->type == PCILIB_KMEM_TYPE_PAGE) kmem_handle->size = kmem_entry->size;
- else if (kmem_handle->size != kmem_entry->size) return EINVAL;
+ if (kmem_handle->type == PCILIB_KMEM_TYPE_PAGE) {
+ kmem_handle->size = kmem_entry->size;
+ } else if (kmem_handle->size != kmem_entry->size) {
+ mod_info("Invalid size of reusable kmem_entry\n");
+ return -EINVAL;
+ }
+
+ if (((kmem_entry->mode&KMEM_MODE_EXCLUSIVE)?1:0) != ((flags&KMEM_FLAG_EXCLUSIVE)?1:0)) {
+ mod_info("Invalid mode of reusable kmem_entry\n");
+ return -EINVAL;
+ }
+ if ((kmem_entry->mode&KMEM_MODE_COUNT)==KMEM_MODE_COUNT) {
+ mod_info("Reuse counter of kmem_entry is overflown");
+ return -EBUSY;
+ }
+
kmem_handle->handle_id = kmem_entry->id;
kmem_handle->pa = (unsigned long)(kmem_entry->dma_handle);
-
- privdata->kmem_cur = kmem_entry;
+
+ kmem_handle->flags = KMEM_FLAG_REUSED;
+ if (kmem_entry->refs&KMEM_REF_HW) kmem_handle->flags |= KMEM_FLAG_REUSED_HW;
+ if (kmem_entry->mode&KMEM_MODE_PERSISTENT) kmem_handle->flags |= KMEM_FLAG_REUSED_PERSISTENT;
+
+ kmem_entry->mode += 1;
+ if (flags&KMEM_FLAG_HW) kmem_entry->refs |= KMEM_REF_HW;
+ if (flags&KMEM_FLAG_PERSISTENT) kmem_entry->mode |= KMEM_MODE_PERSISTENT;
+
+ privdata->kmem_cur_id = kmem_entry->id;
return 0;
}*/
- kmem_handle->reuse = 0;
}
/* First, allocate zeroed memory for the kmem_entry */
@@ -61,10 +85,12 @@ int pcidriver_kmem_alloc(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_han
/* Initialize the kmem_entry */
kmem_entry->id = atomic_inc_return(&privdata->kmem_count) - 1;
+ privdata->kmem_cur_id = kmem_entry->id;
+ kmem_handle->handle_id = kmem_entry->id;
+
kmem_entry->use = kmem_handle->use;
kmem_entry->item = kmem_handle->item;
kmem_entry->type = kmem_handle->type;
- kmem_handle->handle_id = kmem_entry->id;
/* Initialize sysfs if possible */
if (pcidriver_sysfs_initialize_kmem(privdata, kmem_entry->id, &(kmem_entry->sysfs_attr)) != 0)
@@ -94,11 +120,23 @@ int pcidriver_kmem_alloc(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_han
}
if (retptr == NULL)
goto kmem_alloc_mem_fail;
-
+
kmem_entry->size = kmem_handle->size;
kmem_entry->cpua = (unsigned long)retptr;
kmem_handle->pa = (unsigned long)(kmem_entry->dma_handle);
+ kmem_entry->mode = 1;
+ if (kmem_handle->flags&KMEM_FLAG_REUSE) {
+ kmem_entry->mode |= KMEM_MODE_REUSABLE;
+ if (kmem_handle->flags&KMEM_FLAG_EXCLUSIVE) kmem_entry->mode |= KMEM_MODE_EXCLUSIVE;
+ if (kmem_handle->flags&KMEM_FLAG_PERSISTENT) kmem_entry->mode |= KMEM_MODE_PERSISTENT;
+ }
+
+ kmem_entry->refs = 0;
+ if (kmem_handle->flags&KMEM_FLAG_HW) kmem_entry->refs |= KMEM_REF_HW;
+
+ kmem_handle->flags = 0;
+
set_pages_reserved_compat(kmem_entry->cpua, kmem_entry->size);
/* Add the kmem_entry to the list of the device */
@@ -123,15 +161,37 @@ int pcidriver_kmem_free( pcidriver_privdata_t *privdata, kmem_handle_t *kmem_han
{
pcidriver_kmem_entry_t *kmem_entry;
- if (kmem_handle->reuse) {
- // just mark free
- return 0;
- }
-
/* Find the associated kmem_entry for this buffer */
if ((kmem_entry = pcidriver_kmem_find_entry(privdata, kmem_handle)) == NULL)
return -EINVAL; /* kmem_handle is not valid */
+
+ if (kmem_entry->mode&KMEM_MODE_COUNT)
+ kmem_entry->mode -= 1;
+
+ if (kmem_handle->flags&KMEM_FLAG_HW)
+ kmem_entry->refs &= ~KMEM_REF_HW;
+
+ if (kmem_handle->flags&KMEM_FLAG_PERSISTENT)
+ kmem_entry->mode &= ~KMEM_MODE_PERSISTENT;
+
+ if (kmem_handle->flags&KMEM_FLAG_REUSE)
+ return 0;
+
+ if (kmem_entry->refs) {
+ mod_info("can't free referenced kmem_entry\n");
+ kmem_entry->mode += 1;
+ return -EBUSY;
+ }
+
+ if (kmem_entry->mode & KMEM_MODE_PERSISTENT) {
+ mod_info("can't free persistent kmem_entry\n");
+ return -EBUSY;
+ }
+
+ if (((kmem_entry->mode&KMEM_MODE_EXCLUSIVE)==0)&&(kmem_entry->mode&KMEM_MODE_COUNT))
+ return 0;
+
return pcidriver_kmem_free_entry(privdata, kmem_entry);
}
@@ -142,14 +202,23 @@ int pcidriver_kmem_free( pcidriver_privdata_t *privdata, kmem_handle_t *kmem_han
*/
int pcidriver_kmem_free_all(pcidriver_privdata_t *privdata)
{
+ int failed = 0;
struct list_head *ptr, *next;
pcidriver_kmem_entry_t *kmem_entry;
/* iterate safely over the entries and delete them */
list_for_each_safe(ptr, next, &(privdata->kmem_list)) {
kmem_entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
- pcidriver_kmem_free_entry(privdata, kmem_entry); /* spin lock inside! */
+/* if (kmem_entry->refs)
+ failed = 1;
+ else*/
+ pcidriver_kmem_free_entry(privdata, kmem_entry); /* spin lock inside! */
}
+
+ if (failed) {
+ mod_info("Some kmem_entries are still referenced\n");
+ return -EBUSY;
+ }
return 0;
}
@@ -219,8 +288,6 @@ int pcidriver_kmem_sync( pcidriver_privdata_t *privdata, kmem_sync_t *kmem_sync
*/
int pcidriver_kmem_free_entry(pcidriver_privdata_t *privdata, pcidriver_kmem_entry_t *kmem_entry)
{
- privdata->kmem_cur = NULL;
-
pcidriver_sysfs_remove(privdata, &(kmem_entry->sysfs_attr));
/* Go over the pages of the kmem buffer, and mark them as not reserved */
@@ -339,7 +406,7 @@ pcidriver_kmem_entry_t *pcidriver_kmem_find_entry_use(pcidriver_privdata_t *priv
list_for_each(ptr, &(privdata->kmem_list)) {
entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
- if ((entry->use == use)&&(entry->item == item)) {
+ if ((entry->use == use)&&(entry->item == item)&&(entry->mode&KMEM_MODE_REUSABLE)) {
result = entry;
break;
}
@@ -350,6 +417,15 @@ pcidriver_kmem_entry_t *pcidriver_kmem_find_entry_use(pcidriver_privdata_t *priv
}
+void pcidriver_kmem_mmap_close(struct vm_area_struct *vma) {
+ pcidriver_kmem_entry_t *kmem_entry = (pcidriver_kmem_entry_t*)vma->vm_private_data;
+ if (kmem_entry) kmem_entry->refs -= 1;
+}
+
+static struct vm_operations_struct pcidriver_kmem_mmap_ops = {
+ .close = pcidriver_kmem_mmap_close
+};
+
/**
*
* mmap() kernel memory to userspace.
@@ -365,15 +441,11 @@ int pcidriver_mmap_kmem(pcidriver_privdata_t *privdata, struct vm_area_struct *v
/* FIXME: Is this really right? Always just the latest one? Can't we identify one? */
/* Get latest entry on the kmem_list */
- spin_lock(&(privdata->kmemlist_lock));
- if (list_empty(&(privdata->kmem_list))) {
- spin_unlock(&(privdata->kmemlist_lock));
+ kmem_entry = pcidriver_kmem_find_entry_id(privdata, privdata->kmem_cur_id);
+ if (!kmem_entry) {
mod_info("Trying to mmap a kernel memory buffer without creating it first!\n");
return -EFAULT;
}
- if (privdata->kmem_cur) kmem_entry = privdata->kmem_cur;
- else kmem_entry = list_entry(privdata->kmem_list.prev, pcidriver_kmem_entry_t, list);
- spin_unlock(&(privdata->kmemlist_lock));
mod_info_dbg("Got kmem_entry with id: %d\n", kmem_entry->id);
@@ -386,6 +458,17 @@ int pcidriver_mmap_kmem(pcidriver_privdata_t *privdata, struct vm_area_struct *v
return -EINVAL;
}
+ /* reference counting */
+ if ((kmem_entry->mode&KMEM_MODE_EXCLUSIVE)&&(kmem_entry->refs&KMEM_REF_COUNT)) {
+ mod_info("can't make second mmaping for exclusive kmem_entry\n");
+ return -EBUSY;
+ }
+ if ((kmem_entry->refs&KMEM_REF_COUNT)==KMEM_REF_COUNT) {
+ mod_info("maximal amount of references is reached by kmem_entry\n");
+ return -EBUSY;
+ }
+ kmem_entry->refs += 1;
+
vma->vm_flags |= (VM_RESERVED);
#ifdef pgprot_noncached
@@ -406,8 +489,12 @@ int pcidriver_mmap_kmem(pcidriver_privdata_t *privdata, struct vm_area_struct *v
if (ret) {
mod_info("kmem remap failed: %d (%lx)\n", ret,kmem_entry->cpua);
+ kmem_entry->refs -= 1;
return -EAGAIN;
}
+
+ vma->vm_ops = &pcidriver_kmem_mmap_ops;
+ vma->vm_private_data = (void*)kmem_entry;
return ret;
}
diff --git a/driver/pciDriver.h b/driver/pciDriver.h
index 3ff3357..9aa580f 100644
--- a/driver/pciDriver.h
+++ b/driver/pciDriver.h
@@ -99,6 +99,18 @@
/* Maximum number of interrupt sources */
#define PCIDRIVER_INT_MAXSOURCES 16
+
+#define KMEM_FLAG_REUSE 1 /**< Try to reuse existing buffer with the same use & item */
+#define KMEM_FLAG_EXCLUSIVE 2 /**< Allow only a single application accessing a specified use & item */
+#define KMEM_FLAG_PERSISTENT 4 /**< Sets persistent mode */
+#define KMEM_FLAG_HW 8 /**< The buffer may be accessed by hardware, the hardware access will not occur any more if passed to _free function */
+
+#define KMEM_FLAG_REUSED 1 /**< Indicates if buffer with specified use & item was already allocated and reused */
+#define KMEM_FLAG_REUSED_PERSISTENT 4 /**< Indicates that reused buffer was persistent before the call */
+#define KMEM_FLAG_REUSED_HW 8 /**< Indicates that reused buffer had a HW reference before the call */
+
+
+
/* Types */
typedef struct {
unsigned long type;
@@ -107,7 +119,7 @@ typedef struct {
unsigned long align;
unsigned long use;
unsigned long item;
- int reuse;
+ int flags;
int handle_id;
} kmem_handle_t;
diff --git a/driver/sysfs.c b/driver/sysfs.c
index 0653aef..ab0925c 100644
--- a/driver/sysfs.c
+++ b/driver/sysfs.c
@@ -98,7 +98,7 @@ static SYSFS_GET_FUNCTION(pcidriver_show_kmem_entry)
int id = simple_strtol(attr->attr.name + strlen("kbuf"), NULL, 10);
pcidriver_kmem_entry_t *entry = pcidriver_kmem_find_entry_id(privdata, id);
if (entry)
- return snprintf(buf, PAGE_SIZE, "buffer: %d\ntype: %lu\nuse: 0x%lx\nitem: %lu\nsize: %lu\n", id, entry->type, entry->use, entry->item, entry->size);
+ return snprintf(buf, PAGE_SIZE, "buffer: %d\ntype: %lu\nuse: 0x%lx\nitem: %lu\nsize: %lu\nrefs: %lu\nhw ref: %i\nmode: %lx\n", id, entry->type, entry->use, entry->item, entry->size, entry->refs&KMEM_REF_COUNT, (entry->refs&KMEM_REF_HW)?1:0, entry->mode);
else
return snprintf(buf, PAGE_SIZE, "I am in the kmem_entry show function for buffer %d\n", id);
#else
diff --git a/error.h b/error.h
index fcdbe68..568ef18 100644
--- a/error.h
+++ b/error.h
@@ -8,6 +8,7 @@ enum {
PCILIB_ERROR_INVALID_ADDRESS,
PCILIB_ERROR_INVALID_BANK,
PCILIB_ERROR_INVALID_DATA,
+ PCILIB_ERROR_INVALID_STATE,
PCILIB_ERROR_TIMEOUT,
PCILIB_ERROR_FAILED,
PCILIB_ERROR_VERIFY,
diff --git a/kmem.c b/kmem.c
index df01591..bdd17f0 100644
--- a/kmem.c
+++ b/kmem.c
@@ -18,11 +18,16 @@
#include "error.h"
pcilib_kmem_handle_t *pcilib_alloc_kernel_memory(pcilib_t *ctx, pcilib_kmem_type_t type, size_t nmemb, size_t size, size_t alignment, pcilib_kmem_use_t use, pcilib_kmem_flags_t flags) {
+ int err = 0;
+ const char *error = NULL;
+
int ret;
int i;
void *addr;
- pcilib_kmem_reuse_t reuse = PCILIB_KMEM_REUSE_ALLOCATED;
+ pcilib_tristate_t reused = PCILIB_TRISTATE_NO;
+ int persistent = -1;
+ int hardware = -1;
kmem_handle_t kh = {0};
@@ -52,28 +57,44 @@ pcilib_kmem_handle_t *pcilib_alloc_kernel_memory(pcilib_t *ctx, pcilib_kmem_type
for ( i = 0; i < nmemb; i++) {
kh.item = i;
- kh.reuse = PCILIB_KMEM_FLAG_REUSE?1:0;
-
+ kh.flags = flags;
+
ret = ioctl(ctx->handle, PCIDRIVER_IOC_KMEM_ALLOC, &kh);
if (ret) {
kbuf->buf.n_blocks = i;
- pcilib_free_kernel_memory(ctx, kbuf, 0);
- pcilib_error("PCIDRIVER_IOC_KMEM_ALLOC ioctl have failed");
- return NULL;
+ error = "PCIDRIVER_IOC_KMEM_ALLOC ioctl have failed";
+ break;
}
kbuf->buf.blocks[i].handle_id = kh.handle_id;
kbuf->buf.blocks[i].pa = kh.pa;
kbuf->buf.blocks[i].size = kh.size;
- kbuf->buf.blocks[i].reused = kh.reuse;
- if (reuse) {
- // if already reused, set to partial
- if (!kh.reuse) reuse = PCILIB_KMEM_REUSE_PARTIAL;
- } else if (kh.reuse) {
- if (i) reuse = PCILIB_KMEM_REUSE_PARTIAL;
- else reuse = PCILIB_KMEM_REUSE_REUSED;
+ if (!i) reused = (kh.flags&KMEM_FLAG_REUSED)?PCILIB_TRISTATE_YES:PCILIB_TRISTATE_NO;
+
+ if (kh.flags&KMEM_FLAG_REUSED) {
+ if (!i) reused = PCILIB_TRISTATE_YES;
+ else if (!reused) reused = PCILIB_TRISTATE_PARTIAL;
+
+ if (persistent) {
+ if (persistent < 0) persistent = (kh.flags&KMEM_FLAG_REUSED_PERSISTENT)?1:0;
+ else if (kh.flags&KMEM_FLAG_REUSED_PERSISTENT == 0) err = PCILIB_ERROR_INVALID_STATE;
+ } else if (kh.flags&KMEM_FLAG_REUSED_PERSISTENT) err = PCILIB_ERROR_INVALID_STATE;
+
+ if (hardware) {
+ if (hardware < 0) (kh.flags&KMEM_FLAG_REUSED_HW)?1:0;
+ else if (kh.flags&KMEM_FLAG_REUSED_HW == 0) err = PCILIB_ERROR_INVALID_STATE;
+ } else if (kh.flags&KMEM_FLAG_REUSED_HW) err = PCILIB_ERROR_INVALID_STATE;
+
+ if (err) {
+ kbuf->buf.n_blocks = i + 1;
+ break;
+ }
+ } else {
+ if (!i) reused = PCILIB_TRISTATE_NO;
+ else if (reused) reused = PCILIB_TRISTATE_PARTIAL;
}
+
if ((alignment)&&(type != PCILIB_KMEM_TYPE_PAGE)) {
if (kh.pa % alignment) kbuf->buf.blocks[i].alignment_offset = alignment - kh.pa % alignment;
@@ -83,21 +104,52 @@ pcilib_kmem_handle_t *pcilib_alloc_kernel_memory(pcilib_t *ctx, pcilib_kmem_type
addr = mmap( 0, kh.size + kbuf->buf.blocks[i].alignment_offset, PROT_WRITE | PROT_READ, MAP_SHARED, ctx->handle, 0 );
if ((!addr)||(addr == MAP_FAILED)) {
kbuf->buf.n_blocks = i + 1;
- pcilib_free_kernel_memory(ctx, kbuf, 0);
- pcilib_error("Failed to mmap allocated kernel memory");
- return NULL;
+ error = "Failed to mmap allocated kernel memory";
+ break;
}
kbuf->buf.blocks[i].ua = addr;
kbuf->buf.blocks[i].mmap_offset = kh.pa & ctx->page_mask;
}
+
+ if (persistent) {
+ if (persistent < 0) persistent = 0;
+ else if (flags&PCILIB_KMEM_FLAG_PERSISTENT == 0) err = PCILIB_ERROR_INVALID_STATE;
+ }
+
+ if (hardware) {
+ if (hardware < 0) hardware = 0;
+ else if (flags&PCILIB_KMEM_FLAG_HARDWARE == 0) err = PCILIB_ERROR_INVALID_STATE;
+ }
+
+ if (err||error) {
+ pcilib_kmem_flags_t free_flags = 0;
+
+ if ((!persistent)&&(flags&PCILIB_KMEM_FLAG_PERSISTENT)) {
+ // if last one is persistent? Ignore?
+ free_flags |= PCILIB_KMEM_FLAG_PERSISTENT;
+ }
+
+ if ((!hardware)&&(flags&PCILIB_KMEM_FLAG_HARDWARE)) {
+ // if last one is persistent? Ignore?
+ free_flags |= PCILIB_KMEM_FLAG_HARDWARE;
+ }
+
+ pcilib_free_kernel_memory(ctx, kbuf, free_flags);
+
+ if (err) error = "Reused buffers are inconsistent";
+ pcilib_error(error);
+
+ return NULL;
+ }
+
if (nmemb == 1) {
memcpy(&kbuf->buf.addr, &kbuf->buf.blocks[0], sizeof(pcilib_kmem_addr_t));
}
- kbuf->buf.reuse = reuse;
+ kbuf->buf.reused = reused|(persistent?PCILIB_KMEM_REUSE_PERSISTENT:0)|(hardware?PCILIB_KMEM_REUSE_HARDWARE:0);
kbuf->buf.n_blocks = nmemb;
kbuf->prev = NULL;
@@ -124,7 +176,7 @@ void pcilib_free_kernel_memory(pcilib_t *ctx, pcilib_kmem_handle_t *k, pcilib_km
kh.handle_id = kbuf->buf.blocks[i].handle_id;
kh.pa = kbuf->buf.blocks[i].pa;
- kh.reuse = ((flags&PCILIB_KMEM_FLAG_REUSE)||(kbuf->buf.blocks[i].reused));
+ kh.flags = flags;
ret = ioctl(ctx->handle, PCIDRIVER_IOC_KMEM_FREE, &kh);
if ((ret)&&(!err)) err = ret;
}
@@ -136,6 +188,7 @@ void pcilib_free_kernel_memory(pcilib_t *ctx, pcilib_kmem_handle_t *k, pcilib_km
}
}
+
int pcilib_sync_kernel_memory(pcilib_t *ctx, pcilib_kmem_handle_t *k, pcilib_kmem_sync_direction_t dir) {
int i;
int ret;
@@ -187,7 +240,8 @@ size_t pcilib_kmem_get_block_size(pcilib_t *ctx, pcilib_kmem_handle_t *k, size_t
return kbuf->buf.blocks[block].size;
}
-pcilib_kmem_reuse_t pcilib_kmem_is_reused(pcilib_t *ctx, pcilib_kmem_handle_t *k) {
+pcilib_kmem_reuse_state_t pcilib_kmem_is_reused(pcilib_t *ctx, pcilib_kmem_handle_t *k) {
pcilib_kmem_list_t *kbuf = (pcilib_kmem_list_t*)k;
- return kbuf->buf.reuse;
+ return kbuf->buf.reused;
}
+
diff --git a/kmem.h b/kmem.h
index b09424c..42a2fac 100644
--- a/kmem.h
+++ b/kmem.h
@@ -4,15 +4,21 @@
#include "pcilib.h"
typedef enum {
- PCILIB_KMEM_FLAG_REUSE = 1, /**< Try to reuse buffers on alloc and only unmap non-reused buffers on free (reused are freed only if FORCE flag is specified) */
+ PCILIB_KMEM_FLAG_REUSE = KMEM_FLAG_REUSE,
+ PCILIB_KMEM_FLAG_EXCLUSIVE = KMEM_FLAG_EXCLUSIVE,
+ PCILIB_KMEM_FLAG_PERSISTENT = KMEM_FLAG_PERSISTENT,
+ PCILIB_KMEM_FLAG_HARDWARE = KMEM_FLAG_HW
// PCILIB_KMEM_FLAG_FORCE = 2 /**< Force buffer
} pcilib_kmem_flags_t;
typedef enum {
- PCILIB_KMEM_REUSE_ALLOCATED = 0,
- PCILIB_KMEM_REUSE_PARTIAL = 1,
- PCILIB_KMEM_REUSE_REUSED = 2
-} pcilib_kmem_reuse_t;
+ PCILIB_KMEM_REUSE_REUSED = PCILIB_TRISTATE_YES,
+ PCILIB_KMEM_REUSE_ALLOCATED = PCILIB_TRISTATE_NO,
+ PCILIB_KMEM_REUSE_PARTIAL = PCILIB_TRISTATE_PARTIAL,
+ PCILIB_KMEM_REUSE_PERSISTENT = 0x100,
+ PCILIB_KMEM_REUSE_HARDWARE = 0x200
+} pcilib_kmem_reuse_state_t;
+
typedef struct {
int handle_id;
@@ -33,7 +39,8 @@ typedef struct {
*/
typedef struct {
pcilib_kmem_addr_t addr;
- pcilib_kmem_reuse_t reuse;
+
+ pcilib_kmem_reuse_state_t reused;
size_t n_blocks;
pcilib_kmem_addr_t blocks[];
@@ -57,6 +64,6 @@ uintptr_t pcilib_kmem_get_pa(pcilib_t *ctx, pcilib_kmem_handle_t *k);
void *pcilib_kmem_get_block_ua(pcilib_t *ctx, pcilib_kmem_handle_t *k, size_t block);
uintptr_t pcilib_kmem_get_block_pa(pcilib_t *ctx, pcilib_kmem_handle_t *k, size_t block);
size_t pcilib_kmem_get_block_size(pcilib_t *ctx, pcilib_kmem_handle_t *k, size_t block);
-pcilib_kmem_reuse_t pcilib_kmem_is_reused(pcilib_t *ctx, pcilib_kmem_handle_t *k);
+pcilib_kmem_reuse_state_t pcilib_kmem_is_reused(pcilib_t *ctx, pcilib_kmem_handle_t *k);
#endif /* _PCILIB_KMEM_H */
diff --git a/pcilib.h b/pcilib.h
index 42be921..01afe9e 100644
--- a/pcilib.h
+++ b/pcilib.h
@@ -72,7 +72,7 @@ typedef enum {
PCILIB_DMA_FLAGS_DEFAULT = 0,
PCILIB_DMA_FLAG_EOP = 1,
PCILIB_DMA_FLAG_WAIT = 2,
- PCILIB_DMA_FLAG_PERMANENT = 4
+ PCILIB_DMA_FLAG_PERSISTENT = 4
} pcilib_dma_flags_t;
typedef enum {
diff --git a/tools.h b/tools.h
index 121f1a6..e18a58c 100644
--- a/tools.h
+++ b/tools.h
@@ -10,6 +10,13 @@
#define min2(a, b) (((a)<(b))?(a):(b))
+typedef enum {
+ PCILIB_TRISTATE_NO = 0,
+ PCILIB_TRISTATE_PARTIAL = 1,
+ PCILIB_TRISTATE_YES = 2
+} pcilib_tristate_t;
+
+
int pcilib_isnumber(const char *str);
int pcilib_isxnumber(const char *str);
int pcilib_isnumber_n(const char *str, size_t len);