summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug27
-rw-r--r--lib/decompress_inflate.c2
-rw-r--r--lib/dma-debug.c257
-rw-r--r--lib/genalloc.c23
4 files changed, 288 insertions, 21 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 74fdc5cf4adc..60b7b9bd81e8 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -162,6 +162,16 @@ config DEBUG_KERNEL
Say Y here if you are developing drivers or trying to debug and
identify kernel problems.
+config LESS_GCC_OPT
+ bool "Compile with -O1 for debugging"
+ depends on DEBUG_KERNEL
+ default n
+ help
+ Enable this if you are using a source level debugger and
+ experience poor correlation between the program counter
+ and the source display. This will reduce kernel peformance
+ and is only used when debugging.
+
config DEBUG_SHIRQ
bool "Debug shared IRQ handlers"
depends on DEBUG_KERNEL && GENERIC_HARDIRQS
@@ -191,15 +201,27 @@ config LOCKUP_DETECTOR
The overhead should be minimal. A periodic hrtimer runs to
generate interrupts and kick the watchdog task every 4 seconds.
An NMI is generated every 10 seconds or so to check for hardlockups.
+ If NMIs are not available on the platform, every 12 seconds the
+ hrtimer interrupt on one cpu will be used to check for hardlockups
+ on the next cpu.
The frequency of hrtimer and NMI events and the soft and hard lockup
thresholds can be controlled through the sysctl watchdog_thresh.
-config HARDLOCKUP_DETECTOR
+config HARDLOCKUP_DETECTOR_NMI
def_bool y
depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG
depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
+config HARDLOCKUP_DETECTOR_OTHER_CPU
+ def_bool y
+ depends on LOCKUP_DETECTOR && SMP
+ depends on !HARDLOCKUP_DETECTOR_NMI && !HAVE_NMI_WATCHDOG
+
+config HARDLOCKUP_DETECTOR
+ def_bool y
+ depends on HARDLOCKUP_DETECTOR_NMI || HARDLOCKUP_DETECTOR_OTHER_CPU
+
config BOOTPARAM_HARDLOCKUP_PANIC
bool "Panic (Reboot) On Hard Lockups"
depends on HARDLOCKUP_DETECTOR
@@ -669,8 +691,9 @@ config DEBUG_LOCKING_API_SELFTESTS
mutexes and rwsems.
config STACKTRACE
- bool
+ bool "Stacktrace"
depends on STACKTRACE_SUPPORT
+ default y
config DEBUG_STACK_USAGE
bool "Stack utilization instrumentation"
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index 19ff89e34eec..d619b28c456f 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -48,7 +48,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
out_len = 0x8000; /* 32 K */
out_buf = malloc(out_len);
} else {
- out_len = 0x7fffffff; /* no limit */
+ out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
}
if (!out_buf) {
error("Out of memory while allocating output buffer");
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index d87a17a819d0..6f01db6fc567 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -86,6 +86,15 @@ static DEFINE_SPINLOCK(free_entries_lock);
/* Global disable flag - will be set in case of an error */
static u32 global_disable __read_mostly;
+/*
+ * The global_disable flag can be set in kernel command line, and
+ * dma_debug_init() is called much later than that. Mappings can happen before
+ * the init function is called, and thus no memory has been reserved for the
+ * entries. This out-of-memory situation flips enables the global_disable flag,
+ * which cannot then be enabled again. Use another flag for skipping the
+ * tracking if init hasn't been done yet.
+ */
+static bool initialized __read_mostly;
/* Global error count */
static u32 error_count;
@@ -133,6 +142,24 @@ static const char *type2name[4] = { "single", "page",
static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
"DMA_FROM_DEVICE", "DMA_NONE" };
+/* dma statistics per device */
+struct dma_dev_info {
+ struct list_head list;
+ struct device *dev;
+ spinlock_t lock; /* Protects dma_dev_info itself */
+
+ int current_allocs;
+ int total_allocs;
+ int max_allocs;
+
+ int current_alloc_size;
+ int total_alloc_size;
+ int max_alloc_size;
+};
+
+static LIST_HEAD(dev_info_list);
+static DEFINE_SPINLOCK(dev_info_lock); /* Protects dev_info_list */
+
/*
* The access to some variables in this macro is racy. We can't use atomic_t
* here because all these variables are exported to debugfs. Some of them even
@@ -404,6 +431,79 @@ void debug_dma_dump_mappings(struct device *dev)
EXPORT_SYMBOL(debug_dma_dump_mappings);
/*
+ * device info snapshot updating functions
+ */
+static void ____dev_info_incr(struct dma_dev_info *info,
+ struct dma_debug_entry *entry)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock, flags);
+
+ info->current_allocs++;
+ info->total_allocs++;
+ if (info->current_allocs > info->max_allocs)
+ info->max_allocs = info->current_allocs;
+
+ info->current_alloc_size += entry->size;
+ info->total_alloc_size += entry->size;
+ if (info->current_alloc_size > info->max_alloc_size)
+ info->max_alloc_size = info->current_alloc_size;
+
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+static void ____dev_info_decr(struct dma_dev_info *info,
+ struct dma_debug_entry *entry)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock, flags);
+
+ info->current_allocs--;
+ info->current_alloc_size -= entry->size;
+
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+static void __dev_info_fn(struct dma_debug_entry *entry,
+ void (*fn)(struct dma_dev_info *, struct dma_debug_entry *))
+{
+ struct dma_dev_info *info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_info_lock, flags);
+
+ list_for_each_entry(info, &dev_info_list, list)
+ if (info->dev == entry->dev)
+ goto found;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(entry->dev, "Out of memory at %s\n", __func__);
+ spin_unlock_irqrestore(&dev_info_lock, flags);
+ return;
+ }
+
+ spin_lock_init(&info->lock);
+ info->dev = entry->dev;
+ list_add(&info->list, &dev_info_list);
+found:
+ spin_unlock_irqrestore(&dev_info_lock, flags);
+ fn(info, entry);
+}
+
+static inline void dev_info_alloc(struct dma_debug_entry *entry)
+{
+ __dev_info_fn(entry, ____dev_info_incr);
+}
+
+static inline void dev_info_free(struct dma_debug_entry *entry)
+{
+ __dev_info_fn(entry, ____dev_info_decr);
+}
+
+/*
* Wrapper function for adding an entry to the hash.
* This function takes care of locking itself.
*/
@@ -415,6 +515,8 @@ static void add_dma_entry(struct dma_debug_entry *entry)
bucket = get_hash_bucket(entry, &flags);
hash_bucket_add(bucket, entry);
put_hash_bucket(bucket, &flags);
+
+ dev_info_alloc(entry);
}
static struct dma_debug_entry *__dma_entry_alloc(void)
@@ -650,6 +752,125 @@ out_unlock:
return count;
}
+static inline void seq_print_ip_sym(struct seq_file *s, unsigned long ip)
+{
+ seq_printf(s, "[<%p>] %pS\n", (void *)ip, (void *)ip);
+}
+
+void seq_print_trace(struct seq_file *s, struct stack_trace *trace)
+{
+ int i;
+
+ if (WARN_ON(!trace->entries))
+ return;
+
+ for (i = trace->skip; i < trace->nr_entries; i++)
+ seq_print_ip_sym(s, trace->entries[i]);
+}
+
+/*
+ * Print all map entries just in the order they are stored. We assume that the
+ * user will be able to parse this later anyway. Detailed output includes stack
+ * traces of allocations.
+ */
+void seq_print_dma_mappings(struct seq_file *s, int detail)
+{
+ int idx;
+
+ for (idx = 0; idx < HASH_SIZE; idx++) {
+ struct hash_bucket *bucket = &dma_entry_hash[idx];
+ struct dma_debug_entry *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bucket->lock, flags);
+
+ list_for_each_entry(entry, &bucket->list, list) {
+ seq_printf(s,
+ " %s %s idx %d P=%llx D=%llx L=%llx %s A=%s\n",
+ dev_name(entry->dev),
+ type2name[entry->type], idx,
+ (u64)entry->paddr,
+ entry->dev_addr, entry->size,
+ dir2name[entry->direction],
+ debug_dma_platformdata(entry->dev));
+
+ if (detail)
+ seq_print_trace(s, &entry->stacktrace);
+ }
+
+ spin_unlock_irqrestore(&bucket->lock, flags);
+ }
+}
+
+void __weak dma_debugfs_platform_info(struct dentry *dent)
+{
+}
+
+static int _dump_allocs(struct seq_file *s, void *data)
+{
+ int detail = (int)s->private;
+
+ seq_print_dma_mappings(s, detail);
+ return 0;
+}
+
+static int _dump_dev_info(struct seq_file *s, void *data)
+{
+ struct dma_dev_info *i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_info_lock, flags);
+
+ list_for_each_entry(i, &dev_info_list, list)
+ seq_printf(s,
+ "dev=%s curallocs=%d totallocs=%d maxallocs=%d cursize=%d totsize=%d maxsize=%d\n",
+ dev_name(i->dev), i->current_allocs, i->total_allocs,
+ i->max_allocs, i->current_alloc_size,
+ i->total_alloc_size, i->max_alloc_size);
+
+ spin_unlock_irqrestore(&dev_info_lock, flags);
+ return 0;
+}
+
+#define DEFINE_DEBUGFS(__name, __func, __data) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __func, __data); \
+} \
+static const struct file_operations __name ## _fops = { \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+DEFINE_DEBUGFS(_dump_allocs, _dump_allocs, NULL);
+DEFINE_DEBUGFS(_dump_allocs_detail, _dump_allocs, (void *)1);
+DEFINE_DEBUGFS(_dump_dev_info, _dump_dev_info, NULL);
+#undef DEFINE_DEBUGFS
+
+static int map_dump_debug_fs_init(void)
+{
+#define CREATE_FILE(name) \
+ debugfs_create_file(#name, S_IRUGO, \
+ dma_debug_dent, NULL, \
+ &_##name##_fops)
+
+ if (!CREATE_FILE(dump_allocs))
+ return -ENOMEM;
+
+ if (!CREATE_FILE(dump_allocs_detail))
+ return -ENOMEM;
+
+ if (!CREATE_FILE(dump_dev_info))
+ return -ENOMEM;
+
+#undef CREATE_FILE
+
+ dma_debugfs_platform_info(dma_debug_dent);
+ return 0;
+}
+
static const struct file_operations filter_fops = {
.read = filter_read,
.write = filter_write,
@@ -704,6 +925,9 @@ static int dma_debug_fs_init(void)
if (!filter_dent)
goto out_err;
+ if (map_dump_debug_fs_init())
+ goto out_err;
+
return 0;
out_err:
@@ -742,7 +966,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
struct dma_debug_entry *uninitialized_var(entry);
int count;
- if (global_disable)
+ if (!initialized || global_disable)
return 0;
switch (action) {
@@ -770,7 +994,7 @@ void dma_debug_add_bus(struct bus_type *bus)
{
struct notifier_block *nb;
- if (global_disable)
+ if (!initialized || global_disable)
return;
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
@@ -819,6 +1043,7 @@ void dma_debug_init(u32 num_entries)
nr_total_entries = num_free_entries;
pr_info("DMA-API: debugging enabled by kernel config\n");
+ initialized = true;
}
static __init int dma_debug_cmdline(char *str)
@@ -937,6 +1162,8 @@ static void check_unmap(struct dma_debug_entry *ref)
type2name[entry->type]);
}
+ dev_info_free(entry);
+
hash_bucket_del(entry);
dma_entry_free(entry);
@@ -1040,7 +1267,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
{
struct dma_debug_entry *entry;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
if (dma_mapping_error(dev, dma_addr))
@@ -1079,7 +1306,7 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
struct hash_bucket *bucket;
unsigned long flags;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
ref.dev = dev;
@@ -1121,7 +1348,7 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
.direction = direction,
};
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
if (map_single)
@@ -1138,7 +1365,7 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
struct scatterlist *s;
int i;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
for_each_sg(sg, s, mapped_ents, i) {
@@ -1190,7 +1417,7 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *s;
int mapped_ents = 0, i;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
for_each_sg(sglist, s, nelems, i) {
@@ -1221,7 +1448,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
{
struct dma_debug_entry *entry;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
if (unlikely(virt == NULL))
@@ -1254,7 +1481,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
.direction = DMA_BIDIRECTIONAL,
};
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
check_unmap(&ref);
@@ -1266,7 +1493,7 @@ void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
{
struct dma_debug_entry ref;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
ref.type = dma_debug_single;
@@ -1286,7 +1513,7 @@ void debug_dma_sync_single_for_device(struct device *dev,
{
struct dma_debug_entry ref;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
ref.type = dma_debug_single;
@@ -1307,7 +1534,7 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
{
struct dma_debug_entry ref;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
ref.type = dma_debug_single;
@@ -1328,7 +1555,7 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
{
struct dma_debug_entry ref;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
ref.type = dma_debug_single;
@@ -1348,7 +1575,7 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
struct scatterlist *s;
int mapped_ents = 0, i;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
for_each_sg(sg, s, nelems, i) {
@@ -1380,7 +1607,7 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
struct scatterlist *s;
int mapped_ents = 0, i;
- if (unlikely(global_disable))
+ if (unlikely(!initialized || global_disable))
return;
for_each_sg(sg, s, nelems, i) {
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 2a39bf62d8c1..2fb283223314 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -259,21 +259,24 @@ void gen_pool_destroy(struct gen_pool *pool)
EXPORT_SYMBOL(gen_pool_destroy);
/**
- * gen_pool_alloc - allocate special memory from the pool
+ * gen_pool_alloc_addr - allocate special memory from the pool
* @pool: pool to allocate from
* @size: number of bytes to allocate from the pool
+ * @alloc_addr: if non-zero, allocate starting at alloc_addr.
*
* Allocate the requested number of bytes from the specified pool.
* Uses the pool allocation function (with first-fit algorithm by default).
* Can not be used in NMI handler on architectures without
* NMI-safe cmpxchg implementation.
*/
-unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
+unsigned long gen_pool_alloc_addr(struct gen_pool *pool, size_t size,
+ unsigned long alloc_addr)
{
struct gen_pool_chunk *chunk;
unsigned long addr = 0;
int order = pool->min_alloc_order;
int nbits, start_bit = 0, end_bit, remain;
+ int alloc_bit_needed = 0;
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi());
@@ -282,6 +285,9 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
if (size == 0)
return 0;
+ if (alloc_addr & (1 << order) - 1)
+ return 0;
+
nbits = (size + (1UL << order) - 1) >> order;
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
@@ -289,9 +295,20 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
continue;
end_bit = chunk_size(chunk) >> order;
+ if (alloc_addr) {
+ if (alloc_addr < chunk->start_addr ||
+ alloc_addr >= chunk->end_addr)
+ continue;
+ if (alloc_addr + size > chunk->end_addr)
+ return 0;
+ alloc_bit_needed = start_bit =
+ (alloc_addr - chunk->start_addr) >> order;
+ }
retry:
start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
pool->data);
+ if (alloc_addr && alloc_bit_needed != start_bit)
+ return 0;
if (start_bit >= end_bit)
continue;
remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
@@ -310,7 +327,7 @@ retry:
rcu_read_unlock();
return addr;
}
-EXPORT_SYMBOL(gen_pool_alloc);
+EXPORT_SYMBOL(gen_pool_alloc_addr);
/**
* gen_pool_free - free allocated special memory back to the pool