summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/dma-debug.c40
1 files changed, 25 insertions, 15 deletions
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index d87a17a819d0..cc42171ff462 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -86,6 +86,15 @@ static DEFINE_SPINLOCK(free_entries_lock);
/* Global disable flag - will be set in case of an error */
static u32 global_disable __read_mostly;
+/*
+ * The global_disable flag can be set in kernel command line, and
+ * dma_debug_init() is called much later than that. Mappings can happen before
+ * the init function is called, and thus no memory has been reserved for the
+ * entries. This out-of-memory situation flips enables the global_disable flag,
+ * which cannot then be enabled again. Use another flag for skipping the
+ * tracking if init hasn't been done yet.
+ */
+static bool initialized __read_mostly;
/* Global error count */
static u32 error_count;
@@ -742,7 +751,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
struct dma_debug_entry *uninitialized_var(entry);
int count;
- if (global_disable)
+ if (!initialized || global_disable)
return 0;
switch (action) {
@@ -770,7 +779,7 @@ void dma_debug_add_bus(struct bus_type *bus)
{
struct notifier_block *nb;
- if (global_disable)
+ if (!initialized || global_disable)
return;
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
@@ -819,6 +828,7 @@ void dma_debug_init(u32 num_entries)
nr_total_entries = num_free_entries;
pr_info("DMA-API: debugging enabled by kernel config\n");
+ initialized = true;
}
static __init int dma_debug_cmdline(char *str)
@@ -1040,7 +1050,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
{
struct dma_debug_entry *entry;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
if (dma_mapping_error(dev, dma_addr))
@@ -1079,7 +1089,7 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
struct hash_bucket *bucket;
unsigned long flags;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
ref.dev = dev;
@@ -1121,7 +1131,7 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
.direction = direction,
};
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
if (map_single)
@@ -1138,7 +1148,7 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
struct scatterlist *s;
int i;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
for_each_sg(sg, s, mapped_ents, i) {
@@ -1190,7 +1200,7 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *s;
int mapped_ents = 0, i;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
for_each_sg(sglist, s, nelems, i) {
@@ -1221,7 +1231,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
{
struct dma_debug_entry *entry;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
if (unlikely(virt == NULL))
@@ -1254,7 +1264,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
.direction = DMA_BIDIRECTIONAL,
};
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
check_unmap(&ref);
@@ -1266,7 +1276,7 @@ void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
{
struct dma_debug_entry ref;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
ref.type = dma_debug_single;
@@ -1286,7 +1296,7 @@ void debug_dma_sync_single_for_device(struct device *dev,
{
struct dma_debug_entry ref;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
ref.type = dma_debug_single;
@@ -1307,7 +1317,7 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
{
struct dma_debug_entry ref;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
ref.type = dma_debug_single;
@@ -1328,7 +1338,7 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
{
struct dma_debug_entry ref;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
ref.type = dma_debug_single;
@@ -1348,7 +1358,7 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
struct scatterlist *s;
int mapped_ents = 0, i;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
for_each_sg(sg, s, nelems, i) {
@@ -1380,7 +1390,7 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
struct scatterlist *s;
int mapped_ents = 0, i;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
for_each_sg(sg, s, nelems, i) {