@@ -291,20 +291,36 @@ static u64 get_ibs_op_count(u64 config)
291
291
292
292
static void
293
293
perf_ibs_event_update (struct perf_ibs * perf_ibs , struct perf_event * event ,
294
- u64 config )
294
+ u64 * config )
295
295
{
296
- u64 count = perf_ibs -> get_count (config );
296
+ u64 count = perf_ibs -> get_count (* config );
297
297
298
298
while (!perf_event_try_update (event , count , 20 )) {
299
- rdmsrl (event -> hw .config_base , config );
300
- count = perf_ibs -> get_count (config );
299
+ rdmsrl (event -> hw .config_base , * config );
300
+ count = perf_ibs -> get_count (* config );
301
301
}
302
302
}
303
303
304
- /* Note: The enable mask must be encoded in the config argument. */
305
- static inline void perf_ibs_enable_event ( struct hw_perf_event * hwc , u64 config )
304
+ static inline void perf_ibs_enable_event ( struct perf_ibs * perf_ibs ,
305
+ struct hw_perf_event * hwc , u64 config )
306
306
{
307
- wrmsrl (hwc -> config_base , hwc -> config | config );
307
+ wrmsrl (hwc -> config_base , hwc -> config | config | perf_ibs -> enable_mask );
308
+ }
309
+
310
+ /*
311
+ * Erratum #420 Instruction-Based Sampling Engine May Generate
312
+ * Interrupt that Cannot Be Cleared:
313
+ *
314
+ * Must clear counter mask first, then clear the enable bit. See
315
+ * Revision Guide for AMD Family 10h Processors, Publication #41322.
316
+ */
317
+ static inline void perf_ibs_disable_event (struct perf_ibs * perf_ibs ,
318
+ struct hw_perf_event * hwc , u64 config )
319
+ {
320
+ config &= ~perf_ibs -> cnt_mask ;
321
+ wrmsrl (hwc -> config_base , config );
322
+ config &= ~perf_ibs -> enable_mask ;
323
+ wrmsrl (hwc -> config_base , config );
308
324
}
309
325
310
326
/*
@@ -318,18 +334,17 @@ static void perf_ibs_start(struct perf_event *event, int flags)
318
334
struct hw_perf_event * hwc = & event -> hw ;
319
335
struct perf_ibs * perf_ibs = container_of (event -> pmu , struct perf_ibs , pmu );
320
336
struct cpu_perf_ibs * pcpu = this_cpu_ptr (perf_ibs -> pcpu );
321
- u64 config ;
337
+ u64 period ;
322
338
323
339
if (WARN_ON_ONCE (!(hwc -> state & PERF_HES_STOPPED )))
324
340
return ;
325
341
326
342
WARN_ON_ONCE (!(hwc -> state & PERF_HES_UPTODATE ));
327
343
hwc -> state = 0 ;
328
344
329
- perf_ibs_set_period (perf_ibs , hwc , & config );
330
- config = (config >> 4 ) | perf_ibs -> enable_mask ;
345
+ perf_ibs_set_period (perf_ibs , hwc , & period );
331
346
set_bit (IBS_STARTED , pcpu -> state );
332
- perf_ibs_enable_event (hwc , config );
347
+ perf_ibs_enable_event (perf_ibs , hwc , period >> 4 );
333
348
334
349
perf_event_update_userpage (event );
335
350
}
@@ -339,28 +354,27 @@ static void perf_ibs_stop(struct perf_event *event, int flags)
339
354
struct hw_perf_event * hwc = & event -> hw ;
340
355
struct perf_ibs * perf_ibs = container_of (event -> pmu , struct perf_ibs , pmu );
341
356
struct cpu_perf_ibs * pcpu = this_cpu_ptr (perf_ibs -> pcpu );
342
- u64 val ;
357
+ u64 config ;
343
358
int stopping ;
344
359
345
360
stopping = test_and_clear_bit (IBS_STARTED , pcpu -> state );
346
361
347
362
if (!stopping && (hwc -> state & PERF_HES_UPTODATE ))
348
363
return ;
349
364
350
- rdmsrl (hwc -> config_base , val );
365
+ rdmsrl (hwc -> config_base , config );
351
366
352
367
if (stopping ) {
353
368
set_bit (IBS_STOPPING , pcpu -> state );
354
- val &= ~perf_ibs -> enable_mask ;
355
- wrmsrl (hwc -> config_base , val );
369
+ perf_ibs_disable_event (perf_ibs , hwc , config );
356
370
WARN_ON_ONCE (hwc -> state & PERF_HES_STOPPED );
357
371
hwc -> state |= PERF_HES_STOPPED ;
358
372
}
359
373
360
374
if (hwc -> state & PERF_HES_UPTODATE )
361
375
return ;
362
376
363
- perf_ibs_event_update (perf_ibs , event , val );
377
+ perf_ibs_event_update (perf_ibs , event , & config );
364
378
hwc -> state |= PERF_HES_UPTODATE ;
365
379
}
366
380
@@ -456,7 +470,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
456
470
struct perf_ibs_data ibs_data ;
457
471
int offset , size , check_rip , offset_max , throttle = 0 ;
458
472
unsigned int msr ;
459
- u64 * buf , config ;
473
+ u64 * buf , * config , period ;
460
474
461
475
if (!test_bit (IBS_STARTED , pcpu -> state )) {
462
476
/* Catch spurious interrupts after stopping IBS: */
@@ -477,15 +491,15 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
477
491
* supported in all cpus. As this triggered an interrupt, we
478
492
* set the current count to the max count.
479
493
*/
480
- config = ibs_data .regs [0 ];
494
+ config = & ibs_data .regs [0 ];
481
495
if (perf_ibs == & perf_ibs_op && !(ibs_caps & IBS_CAPS_RDWROPCNT )) {
482
- config &= ~IBS_OP_CUR_CNT ;
483
- config |= (config & IBS_OP_MAX_CNT ) << 36 ;
496
+ * config &= ~IBS_OP_CUR_CNT ;
497
+ * config |= (* config & IBS_OP_MAX_CNT ) << 36 ;
484
498
}
485
499
486
500
perf_ibs_event_update (perf_ibs , event , config );
487
501
perf_sample_data_init (& data , 0 , hwc -> last_period );
488
- if (!perf_ibs_set_period (perf_ibs , hwc , & config ))
502
+ if (!perf_ibs_set_period (perf_ibs , hwc , & period ))
489
503
goto out ; /* no sw counter overflow */
490
504
491
505
ibs_data .caps = ibs_caps ;
@@ -523,8 +537,10 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
523
537
524
538
throttle = perf_event_overflow (event , & data , & regs );
525
539
out :
526
- config = (config >> 4 ) | (throttle ? 0 : perf_ibs -> enable_mask );
527
- perf_ibs_enable_event (hwc , config );
540
+ if (throttle )
541
+ perf_ibs_disable_event (perf_ibs , hwc , * config );
542
+ else
543
+ perf_ibs_enable_event (perf_ibs , hwc , period >> 4 );
528
544
529
545
perf_event_update_userpage (event );
530
546
0 commit comments