/*
* Architecture provided APIs - weak aliases:
*/
-
-int __weak hw_perf_counter_init(struct perf_counter *counter)
+extern __weak const struct hw_perf_counter_ops *
+hw_perf_counter_init(struct perf_counter *counter)
{
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
-void __weak hw_perf_counter_enable(struct perf_counter *counter) { }
-void __weak hw_perf_counter_disable(struct perf_counter *counter) { }
-void __weak hw_perf_counter_read(struct perf_counter *counter) { }
-void __weak hw_perf_disable_all(void) { }
-void __weak hw_perf_enable_all(void) { }
-void __weak hw_perf_counter_setup(void) { }
+u64 __weak hw_perf_save_disable(void) { return 0; }
+void __weak hw_perf_restore(u64 ctrl) { }
+void __weak hw_perf_counter_setup(void) { }
#if BITS_PER_LONG == 64
return (u64) atomic64_read(&counter->count);
}
+void atomic64_counter_set(struct perf_counter *counter, u64 val)
+{
+ atomic64_set(&counter->count, val);
+}
+
+u64 atomic64_counter_read(struct perf_counter *counter)
+{
+ return atomic64_read(&counter->count);
+}
+
#else
/*
return cntl | ((u64) cnth) << 32;
}
+void atomic64_counter_set(struct perf_counter *counter, u64 val64)
+{
+ u32 *val32 = (void *)&val64;
+
+ atomic_set(counter->count32 + 0, *(val32 + 0));
+ atomic_set(counter->count32 + 1, *(val32 + 1));
+}
+
+u64 atomic64_counter_read(struct perf_counter *counter)
+{
+ return atomic_read(counter->count32 + 0) |
+ (u64) atomic_read(counter->count32 + 1) << 32;
+}
+
#endif
static void
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_counter *counter = info;
struct perf_counter_context *ctx = counter->ctx;
+ u64 perf_flags;
/*
* If this is a task context, we need to check whether it is
spin_lock(&ctx->lock);
if (counter->active) {
- hw_perf_counter_disable(counter);
+ counter->hw_ops->hw_perf_counter_disable(counter);
counter->active = 0;
ctx->nr_active--;
cpuctx->active_oncpu--;
* Protect the list operation against NMI by disabling the
* counters on a global level. NOP for non NMI based counters.
*/
- hw_perf_disable_all();
+ perf_flags = hw_perf_save_disable();
list_del_counter(counter, ctx);
- hw_perf_enable_all();
+ hw_perf_restore(perf_flags);
if (!ctx->task) {
/*
struct perf_counter *counter = info;
struct perf_counter_context *ctx = counter->ctx;
int cpu = smp_processor_id();
+ u64 perf_flags;
/*
* If this is a task context, we need to check whether it is
* Protect the list operation against NMI by disabling the
* counters on a global level. NOP for non NMI based counters.
*/
- hw_perf_disable_all();
+ perf_flags = hw_perf_save_disable();
list_add_counter(counter, ctx);
- hw_perf_enable_all();
+ hw_perf_restore(perf_flags);
ctx->nr_counters++;
if (cpuctx->active_oncpu < perf_max_counters) {
- hw_perf_counter_enable(counter);
+ counter->hw_ops->hw_perf_counter_enable(counter);
counter->active = 1;
counter->oncpu = cpu;
ctx->nr_active++;
if (!counter->active)
return;
- hw_perf_counter_disable(counter);
+ counter->hw_ops->hw_perf_counter_disable(counter);
counter->active = 0;
counter->oncpu = -1;
struct perf_counter_context *ctx,
int cpu)
{
- hw_perf_counter_enable(counter);
+ counter->hw_ops->hw_perf_counter_enable(counter);
counter->active = 1;
counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
{
struct perf_counter_context *ctx = &curr->perf_counter_ctx;
struct perf_counter *counter;
+ u64 perf_flags;
if (likely(!ctx->nr_counters))
return;
/*
* Rotate the first entry last (works just fine for group counters too):
*/
- hw_perf_disable_all();
+ perf_flags = hw_perf_save_disable();
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
list_del(&counter->list_entry);
list_add_tail(&counter->list_entry, &ctx->counter_list);
break;
}
- hw_perf_enable_all();
+ hw_perf_restore(perf_flags);
spin_unlock(&ctx->lock);
*/
static void __hw_perf_counter_read(void *info)
{
- hw_perf_counter_read(info);
+ struct perf_counter *counter = info;
+
+ counter->hw_ops->hw_perf_counter_read(counter);
}
static u64 perf_counter_read(struct perf_counter *counter)
.poll = perf_poll,
};
+static void cpu_clock_perf_counter_enable(struct perf_counter *counter)
+{
+}
+
+static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
+{
+}
+
+static void cpu_clock_perf_counter_read(struct perf_counter *counter)
+{
+ int cpu = raw_smp_processor_id();
+
+ atomic64_counter_set(counter, cpu_clock(cpu));
+}
+
+static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
+ .hw_perf_counter_enable = cpu_clock_perf_counter_enable,
+ .hw_perf_counter_disable = cpu_clock_perf_counter_disable,
+ .hw_perf_counter_read = cpu_clock_perf_counter_read,
+};
+
+static const struct hw_perf_counter_ops *
+sw_perf_counter_init(struct perf_counter *counter)
+{
+ const struct hw_perf_counter_ops *hw_ops = NULL;
+
+ switch (counter->hw_event.type) {
+ case PERF_COUNT_CPU_CLOCK:
+ hw_ops = &perf_ops_cpu_clock;
+ break;
+ default:
+ break;
+ }
+ return hw_ops;
+}
+
/*
* Allocate and initialize a counter structure
*/
int cpu,
struct perf_counter *group_leader)
{
- struct perf_counter *counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ const struct hw_perf_counter_ops *hw_ops;
+ struct perf_counter *counter;
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
if (!counter)
return NULL;
counter->hw_event = *hw_event;
counter->wakeup_pending = 0;
counter->group_leader = group_leader;
+ counter->hw_ops = NULL;
+
+ hw_ops = NULL;
+ if (!hw_event->raw && hw_event->type < 0)
+ hw_ops = sw_perf_counter_init(counter);
+ if (!hw_ops) {
+ hw_ops = hw_perf_counter_init(counter);
+ }
+
+ if (!hw_ops) {
+ kfree(counter);
+ return NULL;
+ }
+ counter->hw_ops = hw_ops;
return counter;
}
goto err_put_context;
}
- ret = -ENOMEM;
+ ret = -EINVAL;
counter = perf_counter_alloc(&hw_event, cpu, group_leader);
if (!counter)
goto err_put_context;
- ret = hw_perf_counter_init(counter);
- if (ret)
- goto err_free_put_context;
-
perf_install_in_context(ctx, counter, cpu);
ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
mutex_lock(&counter->mutex);
perf_counter_remove_from_context(counter);
mutex_unlock(&counter->mutex);
-
-err_free_put_context:
kfree(counter);
err_put_context: