cpu_buffer.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749
  1. /**
  2. * @file cpu_buffer.c
  3. *
  4. * @remark Copyright 2002-2009 OProfile authors
  5. * @remark Read the file COPYING
  6. *
  7. * @author John Levon <levon@movementarian.org>
  8. * @author Barry Kasindorf <barry.kasindorf@amd.com>
  9. * @author Robert Richter <robert.richter@amd.com>
  10. *
  11. * Each CPU has a local buffer that stores PC value/event
  12. * pairs. We also log context switches when we notice them.
  13. * Eventually each CPU's buffer is processed into the global
  14. * event buffer by sync_buffer().
  15. *
  16. * We use a local buffer for two reasons: an NMI or similar
  17. * interrupt cannot synchronise, and high sampling rates
  18. * would lead to catastrophic global synchronisation if
  19. * a global buffer was used.
  20. */
  21. #include "u.h"
  22. #include "../port/lib.h"
  23. #include "mem.h"
  24. #include "dat.h"
  25. #include "fns.h"
  26. #include "../port/error.h"
  27. #include "cpu_buffer.h"
  28. #include <oprofile.h>
  29. #define OP_BUFFER_FLAGS 0
  30. int num_cpus = 8; // FIXME -- where do we get this.
  31. /* we allocate an array of these and set the pointer in mach */
  32. struct oprofile_cpu_buffer *op_cpu_buffer;
  33. /* this one queue is used by #K to get all events. */
  34. static Queue *opq;
  35. /* this is run from core 0 for all cpu buffers. */
  36. //static void wq_sync_buffer(void);
  37. unsigned long oprofile_cpu_buffer_size = 65536;
  38. unsigned long oprofile_backtrace_depth = 16;
  39. #define DEFAULT_TIMER_EXPIRE (HZ / 10)
  40. static int work_enabled;
  41. /*
  42. * Resets the cpu buffer to a sane state.
  43. *
  44. * reset these to invalid values; the next sample collected will
  45. * populate the buffer with proper values to initialize the buffer
  46. */
  47. static inline void op_cpu_buffer_reset(int cpu)
  48. {
  49. //print_func_entry();
  50. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  51. cpu_buf->last_is_kernel = -1;
  52. cpu_buf->last_proc = nil;
  53. //print_func_exit();
  54. }
  55. /* returns the remaining free size of data in the entry */
  56. static inline
  57. int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val)
  58. {
  59. //print_func_entry();
  60. if (!entry->size) {
  61. //print_func_exit();
  62. return 0;
  63. }
  64. *entry->data = val;
  65. entry->size--;
  66. entry->data++;
  67. //print_func_exit();
  68. return entry->size;
  69. }
  70. /* returns the size of data in the entry */
  71. static inline int op_cpu_buffer_get_size(struct op_entry *entry)
  72. {
  73. //print_func_entry();
  74. //print_func_exit();
  75. return entry->size;
  76. }
  77. /* returns 0 if empty or the size of data including the current value */
  78. static inline
  79. int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val)
  80. {
  81. //print_func_entry();
  82. int size = entry->size;
  83. if (!size) {
  84. //print_func_exit();
  85. return 0;
  86. }
  87. *val = *entry->data;
  88. entry->size--;
  89. entry->data++;
  90. //print_func_exit();
  91. return size;
  92. }
  93. unsigned long oprofile_get_cpu_buffer_size(void)
  94. {
  95. //print_func_entry();
  96. //print_func_exit();
  97. return oprofile_cpu_buffer_size;
  98. }
  99. void oprofile_cpu_buffer_inc_smpl_lost(void)
  100. {
  101. //print_func_entry();
  102. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  103. cpu_buf->sample_lost_overflow++;
  104. //print_func_exit();
  105. }
  106. void free_cpu_buffers(void)
  107. {
  108. //print_func_entry();
  109. free(op_cpu_buffer);
  110. /* we can just leave the queue set up; it will then always return EOF */
  111. //print_func_exit();
  112. }
  113. #define RB_EVENT_HDR_SIZE 4
  114. int alloc_cpu_buffers(void)
  115. {
  116. //print_func_entry();
  117. /* should probably start using waserror() here. The fail stuff just gets
  118. * ugly.
  119. */
  120. int i;
  121. unsigned long buffer_size = oprofile_cpu_buffer_size;
  122. /* this can get called lots of times. Things might have been freed.
  123. * So be careful.
  124. */
  125. /* what limit? No idea. */
  126. if (!opq)
  127. opq = qopen(1024, 0, nil, nil);
  128. if (!opq)
  129. goto fail;
  130. /* we *really* don't want to block. Losing data is better. */
  131. qnoblock(opq, 1);
  132. if (!op_cpu_buffer) {
  133. op_cpu_buffer = smalloc(sizeof(*op_cpu_buffer) * num_cpus);
  134. if (!op_cpu_buffer)
  135. goto fail;
  136. for (i = 0; i < num_cpus; i++) {
  137. struct oprofile_cpu_buffer *b = &op_cpu_buffer[i];
  138. b->last_proc = nil;
  139. b->last_is_kernel = -1;
  140. b->tracing = 0;
  141. b->buffer_size = buffer_size;
  142. b->sample_received = 0;
  143. b->sample_lost_overflow = 0;
  144. b->backtrace_aborted = 0;
  145. b->sample_invalid_eip = 0;
  146. b->cpu = i;
  147. b->fullqueue = qopen(1024, Qmsg, nil, nil);
  148. b->emptyqueue = qopen(1024, Qmsg, nil, nil);
  149. }
  150. }
  151. //print_func_exit();
  152. return 0;
  153. fail:
  154. free_cpu_buffers();
  155. //print_func_exit();
  156. panic("alloc_cpu_buffers");
  157. return -1;
  158. }
  159. void start_cpu_work(void)
  160. {
  161. //print_func_entry();
  162. work_enabled = 1;
  163. //print_func_exit();
  164. }
  165. void end_cpu_work(void)
  166. {
  167. //print_func_entry();
  168. work_enabled = 0;
  169. //print_func_exit();
  170. }
  171. /* placeholder. Not used yet.
  172. */
  173. void flush_cpu_work(void)
  174. {
  175. //print_func_entry();
  176. //struct oprofile_cpu_buffer *b = &op_cpu_buffer[machp()->machno];
  177. //print_func_exit();
  178. }
  179. /* Not used since we're not doing per-cpu buffering yet.
  180. */
  181. struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
  182. {
  183. //print_func_entry();
  184. //print_func_exit();
  185. return nil;
  186. }
  187. static Block *op_cpu_buffer_write_reserve(struct oprofile_cpu_buffer *cpu_buf,
  188. struct op_entry *entry, int size)
  189. {
  190. //print_func_entry();
  191. // Block *b; this gets some bizarre gcc set but not used error.
  192. int totalsize = sizeof(struct op_sample) +
  193. size * sizeof(entry->sample->data[0]);
  194. Block *b = cpu_buf->block;
  195. /* we might have run out. */
  196. if ((! b) || (b->lim - b->wp) < size) {
  197. if (b){
  198. qibwrite(opq, b);
  199. }
  200. /* For now. Later, we will grab a block off the
  201. * emptyblock queue.
  202. */
  203. cpu_buf->block = b = iallocb(oprofile_cpu_buffer_size);
  204. if (!b) {
  205. print("%s: fail\n", __func__);
  206. //print_func_exit();
  207. return nil;
  208. }
  209. }
  210. entry->sample = (void *)b->wp;
  211. entry->size = size;
  212. entry->data = entry->sample->data;
  213. b->wp += totalsize;
  214. //print_func_exit();
  215. return b;
  216. }
  217. static int
  218. op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
  219. int is_kernel, Proc *proc)
  220. {
  221. Proc *up = externup();
  222. //print_func_entry();
  223. // Block *b; this gets some bizarre gcc set but not used error. Block *b;
  224. struct op_entry entry;
  225. unsigned long flags;
  226. int size;
  227. flags = 0;
  228. if (waserror()) {
  229. poperror();
  230. print("%s: failed\n", __func__);
  231. //print_func_exit();
  232. return 1;
  233. }
  234. if (backtrace)
  235. flags |= TRACE_BEGIN;
  236. /* notice a switch from user->kernel or vice versa */
  237. is_kernel = ! !is_kernel;
  238. if (cpu_buf->last_is_kernel != is_kernel) {
  239. cpu_buf->last_is_kernel = is_kernel;
  240. flags |= KERNEL_CTX_SWITCH;
  241. if (is_kernel)
  242. flags |= IS_KERNEL;
  243. }
  244. /* notice a proc switch */
  245. if (cpu_buf->last_proc != proc) {
  246. cpu_buf->last_proc = proc;
  247. flags |= USER_CTX_SWITCH;
  248. }
  249. if (!flags) {
  250. poperror();
  251. /* nothing to do */
  252. //print_func_exit();
  253. return 0;
  254. }
  255. if (flags & USER_CTX_SWITCH)
  256. size = 1;
  257. else
  258. size = 0;
  259. op_cpu_buffer_write_reserve(cpu_buf, &entry, size);
  260. entry.sample->eip = ESCAPE_CODE;
  261. entry.sample->event = flags;
  262. if (size)
  263. op_cpu_buffer_add_data(&entry, (unsigned long)proc);
  264. poperror();
  265. //print_func_exit();
  266. return 0;
  267. }
  268. static inline int
  269. op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
  270. unsigned long pc, unsigned long event)
  271. {
  272. Proc *up = externup();
  273. //print_func_entry();
  274. struct op_entry entry;
  275. struct op_sample *sample;
  276. // Block *b; this gets some bizarre gcc set but not used error. Block *b;
  277. if (waserror()) {
  278. poperror();
  279. print("%s: failed\n", __func__);
  280. //print_func_exit();
  281. return 1;
  282. }
  283. op_cpu_buffer_write_reserve(cpu_buf, &entry, 0);
  284. sample = entry.sample;
  285. sample->eip = pc;
  286. sample->event = event;
  287. poperror();
  288. //print_func_exit();
  289. return 0;
  290. }
  291. /*
  292. * This must be safe from any context.
  293. *
  294. * is_kernel is needed because on some architectures you cannot
  295. * tell if you are in kernel or user space simply by looking at
  296. * pc. We tag this in the buffer by generating kernel enter/exit
  297. * events whenever is_kernel changes
  298. */
  299. static int
  300. log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
  301. unsigned long backtrace, int is_kernel, unsigned long event,
  302. Proc *proc)
  303. {
  304. //print_func_entry();
  305. Proc *tsk = proc ? proc : externup();
  306. cpu_buf->sample_received++;
  307. if (pc == ESCAPE_CODE) {
  308. cpu_buf->sample_invalid_eip++;
  309. //print_func_exit();
  310. return 0;
  311. }
  312. /* ah, so great. op_add* return 1 in event of failure.
  313. * this function returns 0 in event of failure.
  314. * what a cluster.
  315. */
  316. lock(&cpu_buf->lock);
  317. if (op_add_code(cpu_buf, backtrace, is_kernel, tsk))
  318. goto fail;
  319. if (op_add_sample(cpu_buf, pc, event))
  320. goto fail;
  321. unlock(&cpu_buf->lock);
  322. //print_func_exit();
  323. return 1;
  324. fail:
  325. cpu_buf->sample_lost_overflow++;
  326. //print_func_exit();
  327. return 0;
  328. }
  329. static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
  330. {
  331. //print_func_entry();
  332. cpu_buf->tracing = 1;
  333. //print_func_exit();
  334. }
  335. static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
  336. {
  337. //print_func_entry();
  338. cpu_buf->tracing = 0;
  339. //print_func_exit();
  340. }
  341. void oprofile_cpubuf_flushone(int core, int newbuf)
  342. {
  343. //print_func_entry();
  344. struct oprofile_cpu_buffer *cpu_buf;
  345. cpu_buf = &op_cpu_buffer[core];
  346. lock(&cpu_buf->lock);
  347. if (cpu_buf->block) {
  348. print("Core %d has data\n", core);
  349. qibwrite(opq, cpu_buf->block);
  350. print("After qibwrite in %s, opq len %d\n", __func__, qlen(opq));
  351. }
  352. if (newbuf)
  353. cpu_buf->block = iallocb(oprofile_cpu_buffer_size);
  354. else
  355. cpu_buf->block = nil;
  356. unlock(&cpu_buf->lock);
  357. //print_func_exit();
  358. }
  359. void oprofile_cpubuf_flushall(int alloc)
  360. {
  361. //print_func_entry();
  362. int core;
  363. for(core = 0; core < num_cpus; core++) {
  364. oprofile_cpubuf_flushone(core, alloc);
  365. }
  366. //print_func_exit();
  367. }
  368. void oprofile_control_trace(int onoff)
  369. {
  370. //print_func_entry();
  371. int core;
  372. struct oprofile_cpu_buffer *cpu_buf;
  373. for(core = 0; core < num_cpus; core++) {
  374. cpu_buf = &op_cpu_buffer[core];
  375. cpu_buf->tracing = onoff;
  376. if (onoff) {
  377. print("Enable tracing on %d\n", core);
  378. continue;
  379. }
  380. /* halting. Force out all buffers. */
  381. oprofile_cpubuf_flushone(core, 0);
  382. }
  383. //print_func_exit();
  384. }
  385. static inline void
  386. __oprofile_add_ext_sample(unsigned long pc,
  387. void /*struct pt_regs */ *const regs,
  388. unsigned long event, int is_kernel, Proc *proc)
  389. {
  390. //print_func_entry();
  391. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  392. unsigned long backtrace = oprofile_backtrace_depth;
  393. /*
  394. * if log_sample() fail we can't backtrace since we lost the
  395. * source of this event
  396. */
  397. if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, proc))
  398. /* failed */
  399. {
  400. //print_func_exit();
  401. return;
  402. }
  403. if (!backtrace) {
  404. //print_func_exit();
  405. return;
  406. }
  407. #if 0
  408. oprofile_begin_trace(cpu_buf);
  409. oprofile_ops.backtrace(regs, backtrace);
  410. oprofile_end_trace(cpu_buf);
  411. #endif
  412. //print_func_exit();
  413. }
  414. void oprofile_add_ext_hw_sample(unsigned long pc,
  415. Ureg *regs,
  416. unsigned long event, int is_kernel,
  417. Proc *proc)
  418. {
  419. //print_func_entry();
  420. __oprofile_add_ext_sample(pc, regs, event, is_kernel, proc);
  421. //print_func_exit();
  422. }
  423. void oprofile_add_ext_sample(unsigned long pc,
  424. void /*struct pt_regs */ *const regs,
  425. unsigned long event, int is_kernel)
  426. {
  427. //print_func_entry();
  428. __oprofile_add_ext_sample(pc, regs, event, is_kernel, nil);
  429. //print_func_exit();
  430. }
  431. void oprofile_add_sample(void /*struct pt_regs */ *const regs,
  432. unsigned long event)
  433. {
  434. //print_func_entry();
  435. int is_kernel;
  436. unsigned long pc;
  437. if (regs) {
  438. is_kernel = 0; // FIXME!user_mode(regs);
  439. pc = 0; // FIXME profile_pc(regs);
  440. } else {
  441. is_kernel = 0; /* This value will not be used */
  442. pc = ESCAPE_CODE; /* as this causes an early return. */
  443. }
  444. __oprofile_add_ext_sample(pc, regs, event, is_kernel, nil);
  445. //print_func_exit();
  446. }
  447. /*
  448. * Add samples with data to the ring buffer.
  449. *
  450. * Use oprofile_add_data(&entry, val) to add data and
  451. * oprofile_write_commit(&entry) to commit the sample.
  452. */
  453. void
  454. oprofile_write_reserve(struct op_entry *entry,
  455. Ureg *regs,
  456. unsigned long pc, int code, int size)
  457. {
  458. Proc *up = externup();
  459. //print_func_entry();
  460. struct op_sample *sample;
  461. // Block *b; this gets some bizarre gcc set but not used error. Block *b;
  462. int is_kernel = 0; // FIXME!user_mode(regs);
  463. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  464. if (waserror()) {
  465. print("%s: failed\n", __func__);
  466. poperror();
  467. goto fail;
  468. }
  469. cpu_buf->sample_received++;
  470. /* no backtraces for samples with data */
  471. if (op_add_code(cpu_buf, 0, is_kernel, externup()))
  472. goto fail;
  473. op_cpu_buffer_write_reserve(cpu_buf, entry, size + 2);
  474. sample = entry->sample;
  475. sample->eip = ESCAPE_CODE;
  476. sample->event = 0; /* no flags */
  477. op_cpu_buffer_add_data(entry, code);
  478. op_cpu_buffer_add_data(entry, pc);
  479. poperror();
  480. //print_func_exit();
  481. return;
  482. fail:
  483. entry->event = nil;
  484. cpu_buf->sample_lost_overflow++;
  485. //print_func_exit();
  486. }
  487. int oprofile_add_data(struct op_entry *entry, unsigned long val)
  488. {
  489. //print_func_entry();
  490. if (!entry->event) {
  491. //print_func_exit();
  492. return 0;
  493. }
  494. //print_func_exit();
  495. return op_cpu_buffer_add_data(entry, val);
  496. }
  497. int oprofile_add_data64(struct op_entry *entry, uint64_t val)
  498. {
  499. //print_func_entry();
  500. if (!entry->event) {
  501. //print_func_exit();
  502. return 0;
  503. }
  504. if (op_cpu_buffer_get_size(entry) < 2)
  505. /*
  506. * the function returns 0 to indicate a too small
  507. * buffer, even if there is some space left
  508. */
  509. {
  510. //print_func_exit();
  511. return 0;
  512. }
  513. if (!op_cpu_buffer_add_data(entry, (uint32_t) val)) {
  514. //print_func_exit();
  515. return 0;
  516. }
  517. //print_func_exit();
  518. return op_cpu_buffer_add_data(entry, (uint32_t) (val >> 32));
  519. }
  520. int oprofile_write_commit(struct op_entry *entry)
  521. {
  522. //print_func_entry();
  523. /* not much to do at present. In future, we might write the Block
  524. * to opq.
  525. */
  526. //print_func_exit();
  527. return 0;
  528. }
  529. void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
  530. {
  531. //print_func_entry();
  532. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  533. log_sample(cpu_buf, pc, 0, is_kernel, event, nil);
  534. //print_func_exit();
  535. }
  536. void oprofile_add_trace(unsigned long pc)
  537. {
  538. if (! op_cpu_buffer)
  539. return;
  540. //print_func_entry();
  541. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  542. if (!cpu_buf->tracing) {
  543. //print_func_exit();
  544. return;
  545. }
  546. /*
  547. * broken frame can give an eip with the same value as an
  548. * escape code, abort the trace if we get it
  549. */
  550. if (pc == ESCAPE_CODE)
  551. goto fail;
  552. if (op_add_sample(cpu_buf, pc, fastticks2ns(rdtsc())))
  553. goto fail;
  554. //print_func_exit();
  555. return;
  556. fail:
  557. print("%s: fail. Turning of tracing on cpu %d\n", machp()->machno);
  558. cpu_buf->tracing = 0;
  559. cpu_buf->backtrace_aborted++;
  560. //print_func_exit();
  561. return;
  562. }
  563. /* Format for samples:
  564. * first word:
  565. * high 8 bits is ee, which is an invalid address on amd64.
  566. * next 8 bits is protocol version
  567. * next 16 bits is unused, MBZ. Later, we can make it a packet type.
  568. * next 16 bits is core id
  569. * next 8 bits is unused
  570. * next 8 bits is # PCs following. This should be at least 1, for one EIP.
  571. *
  572. * second word is time in ns.
  573. *
  574. * Third and following words are PCs, there must be at least one of them.
  575. */
  576. void oprofile_add_backtrace(uintptr_t pc, uintptr_t fp)
  577. {
  578. /* version 1. */
  579. uint64_t descriptor = 0xee01ULL<<48;
  580. if (! op_cpu_buffer)
  581. return;
  582. //print_func_entry();
  583. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  584. if (!cpu_buf->tracing) {
  585. //print_func_exit();
  586. return;
  587. }
  588. struct op_entry entry;
  589. struct op_sample *sample;
  590. // Block *b; this gets some bizarre gcc set but not used error. Block *b;
  591. uint64_t event = fastticks2ns(rdtsc());
  592. uintptr_t bt_pcs[oprofile_backtrace_depth];
  593. int nr_pcs;
  594. nr_pcs = backtrace_list(pc, fp, bt_pcs, oprofile_backtrace_depth);
  595. /* write_reserve always assumes passed-in-size + 2.
  596. * backtrace_depth should always be > 0.
  597. */
  598. if (!op_cpu_buffer_write_reserve(cpu_buf, &entry, nr_pcs))
  599. return;
  600. /* we are changing the sample format, but not the struct
  601. * member names yet. Later, assuming this works out.
  602. */
  603. descriptor |= (machp()->machno << 16) | nr_pcs;
  604. sample = entry.sample;
  605. sample->eip = descriptor;
  606. sample->event = event;
  607. memmove(sample->data, bt_pcs, sizeof(uintptr_t) * nr_pcs);
  608. //print_func_exit();
  609. return;
  610. }
  611. void oprofile_add_userpc(uintptr_t pc)
  612. {
  613. struct oprofile_cpu_buffer *cpu_buf;
  614. uint32_t pcoreid = machp()->machno;
  615. struct op_entry entry;
  616. // Block *b; this gets some bizarre gcc set but not used error.
  617. uint64_t descriptor = (0xee01ULL << 48) | (pcoreid << 16) | 1;
  618. if (!op_cpu_buffer)
  619. return;
  620. cpu_buf = &op_cpu_buffer[pcoreid];
  621. if (!cpu_buf->tracing)
  622. return;
  623. /* write_reserve always assumes passed-in-size + 2. need room for 1 PC. */
  624. Block *b = op_cpu_buffer_write_reserve(cpu_buf, &entry, 1);
  625. if (!b)
  626. return;
  627. entry.sample->eip = descriptor;
  628. entry.sample->event = fastticks2ns(rdtsc());
  629. /* entry.sample->data == entry.data */
  630. assert(entry.sample->data == entry.data);
  631. *entry.sample->data = pc;
  632. }
  633. int
  634. oproflen(void)
  635. {
  636. return qlen(opq);
  637. }
  638. /* return # bytes read, or 0 if profiling is off, or block if profiling on and no data.
  639. */
  640. int
  641. oprofread(void *va, int n)
  642. {
  643. int len = qlen(opq);
  644. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  645. if (len == 0) {
  646. if (cpu_buf->tracing == 0)
  647. return 0;
  648. }
  649. len = qread(opq, va, n);
  650. return len;
  651. }