@@ -498,7 +498,7 @@ static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
498498 * To be called with the counter disabled in hw:
499499 */
500500static void
501- __hw_perf_counter_set_period (struct perf_counter * counter ,
501+ x86_perf_counter_set_period (struct perf_counter * counter ,
502502 struct hw_perf_counter * hwc , int idx )
503503{
504504 s64 left = atomic64_read (& hwc -> period_left );
@@ -642,7 +642,7 @@ static int x86_pmu_enable(struct perf_counter *counter)
642642 */
643643 barrier ();
644644
645- __hw_perf_counter_set_period (counter , hwc , idx );
645+ x86_perf_counter_set_period (counter , hwc , idx );
646646 __x86_pmu_enable (counter , hwc , idx );
647647
648648 return 0 ;
@@ -731,7 +731,7 @@ static void perf_save_and_restart(struct perf_counter *counter)
731731 int idx = hwc -> idx ;
732732
733733 x86_perf_counter_update (counter , hwc , idx );
734- __hw_perf_counter_set_period (counter , hwc , idx );
734+ x86_perf_counter_set_period (counter , hwc , idx );
735735
736736 if (counter -> state == PERF_COUNTER_STATE_ACTIVE )
737737 __x86_pmu_enable (counter , hwc , idx );
0 commit comments