cpu_buffer.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750
  1. /**
  2. * @file cpu_buffer.c
  3. *
  4. * @remark Copyright 2002-2009 OProfile authors
  5. * @remark Read the file COPYING
  6. *
  7. * @author John Levon <levon@movementarian.org>
  8. * @author Barry Kasindorf <barry.kasindorf@amd.com>
  9. * @author Robert Richter <robert.richter@amd.com>
  10. *
  11. * Each CPU has a local buffer that stores PC value/event
  12. * pairs. We also log context switches when we notice them.
  13. * Eventually each CPU's buffer is processed into the global
  14. * event buffer by sync_buffer().
  15. *
  16. * We use a local buffer for two reasons: an NMI or similar
  17. * interrupt cannot synchronise, and high sampling rates
  18. * would lead to catastrophic global synchronisation if
  19. * a global buffer was used.
  20. */
  21. #include "u.h"
  22. #include "../port/lib.h"
  23. #include "mem.h"
  24. #include "dat.h"
  25. #include "fns.h"
  26. #include "../port/error.h"
  27. #include "cpu_buffer.h"
  28. #include <oprofile.h>
  29. #define OP_BUFFER_FLAGS 0
  30. int num_cpus = 8; // FIXME -- where do we get this.
  31. /* we allocate an array of these and set the pointer in mach */
  32. struct oprofile_cpu_buffer *op_cpu_buffer;
  33. /* this one queue is used by #K to get all events. */
  34. static Queue *opq;
  35. /* this is run from core 0 for all cpu buffers. */
  36. //static void wq_sync_buffer(void);
  37. unsigned long oprofile_cpu_buffer_size = 65536;
  38. unsigned long oprofile_backtrace_depth = 16;
  39. #define DEFAULT_TIMER_EXPIRE (HZ / 10)
  40. static int work_enabled;
  41. /*
  42. * Resets the cpu buffer to a sane state.
  43. *
  44. * reset these to invalid values; the next sample collected will
  45. * populate the buffer with proper values to initialize the buffer
  46. */
  47. static inline void op_cpu_buffer_reset(int cpu)
  48. {
  49. //print_func_entry();
  50. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  51. cpu_buf->last_is_kernel = -1;
  52. cpu_buf->last_proc = nil;
  53. //print_func_exit();
  54. }
  55. /* returns the remaining free size of data in the entry */
  56. static inline
  57. int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val)
  58. {
  59. //print_func_entry();
  60. assert(entry->size >= 0);
  61. if (!entry->size) {
  62. //print_func_exit();
  63. return 0;
  64. }
  65. *entry->data = val;
  66. entry->size--;
  67. entry->data++;
  68. //print_func_exit();
  69. return entry->size;
  70. }
  71. /* returns the size of data in the entry */
  72. static inline int op_cpu_buffer_get_size(struct op_entry *entry)
  73. {
  74. //print_func_entry();
  75. //print_func_exit();
  76. return entry->size;
  77. }
  78. /* returns 0 if empty or the size of data including the current value */
  79. static inline
  80. int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val)
  81. {
  82. //print_func_entry();
  83. int size = entry->size;
  84. if (!size) {
  85. //print_func_exit();
  86. return 0;
  87. }
  88. *val = *entry->data;
  89. entry->size--;
  90. entry->data++;
  91. //print_func_exit();
  92. return size;
  93. }
  94. unsigned long oprofile_get_cpu_buffer_size(void)
  95. {
  96. //print_func_entry();
  97. //print_func_exit();
  98. return oprofile_cpu_buffer_size;
  99. }
  100. void oprofile_cpu_buffer_inc_smpl_lost(void)
  101. {
  102. //print_func_entry();
  103. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  104. cpu_buf->sample_lost_overflow++;
  105. //print_func_exit();
  106. }
  107. void free_cpu_buffers(void)
  108. {
  109. //print_func_entry();
  110. free(op_cpu_buffer);
  111. /* we can just leave the queue set up; it will then always return EOF */
  112. //print_func_exit();
  113. }
  114. #define RB_EVENT_HDR_SIZE 4
  115. int alloc_cpu_buffers(void)
  116. {
  117. //print_func_entry();
  118. /* should probably start using waserror() here. The fail stuff just gets
  119. * ugly.
  120. */
  121. int i;
  122. unsigned long buffer_size = oprofile_cpu_buffer_size;
  123. /* this can get called lots of times. Things might have been freed.
  124. * So be careful.
  125. */
  126. /* what limit? No idea. */
  127. if (!opq)
  128. opq = qopen(1024, 0, nil, nil);
  129. if (!opq)
  130. goto fail;
  131. /* we *really* don't want to block. Losing data is better. */
  132. qnoblock(opq, 1);
  133. if (!op_cpu_buffer) {
  134. op_cpu_buffer = smalloc(sizeof(*op_cpu_buffer) * num_cpus);
  135. if (!op_cpu_buffer)
  136. goto fail;
  137. for (i = 0; i < num_cpus; i++) {
  138. struct oprofile_cpu_buffer *b = &op_cpu_buffer[i];
  139. b->last_proc = nil;
  140. b->last_is_kernel = -1;
  141. b->tracing = 0;
  142. b->buffer_size = buffer_size;
  143. b->sample_received = 0;
  144. b->sample_lost_overflow = 0;
  145. b->backtrace_aborted = 0;
  146. b->sample_invalid_eip = 0;
  147. b->cpu = i;
  148. b->fullqueue = qopen(1024, Qmsg, nil, nil);
  149. b->emptyqueue = qopen(1024, Qmsg, nil, nil);
  150. }
  151. }
  152. //print_func_exit();
  153. return 0;
  154. fail:
  155. free_cpu_buffers();
  156. //print_func_exit();
  157. panic("alloc_cpu_buffers");
  158. return -1;
  159. }
  160. void start_cpu_work(void)
  161. {
  162. //print_func_entry();
  163. work_enabled = 1;
  164. //print_func_exit();
  165. }
  166. void end_cpu_work(void)
  167. {
  168. //print_func_entry();
  169. work_enabled = 0;
  170. //print_func_exit();
  171. }
  172. /* placeholder. Not used yet.
  173. */
  174. void flush_cpu_work(void)
  175. {
  176. //print_func_entry();
  177. //struct oprofile_cpu_buffer *b = &op_cpu_buffer[machp()->machno];
  178. //print_func_exit();
  179. }
  180. /* Not used since we're not doing per-cpu buffering yet.
  181. */
  182. struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
  183. {
  184. //print_func_entry();
  185. //print_func_exit();
  186. return nil;
  187. }
  188. static Block *op_cpu_buffer_write_reserve(struct oprofile_cpu_buffer *cpu_buf,
  189. struct op_entry *entry, int size)
  190. {
  191. //print_func_entry();
  192. // Block *b; this gets some bizarre gcc set but not used error.
  193. int totalsize = sizeof(struct op_sample) +
  194. size * sizeof(entry->sample->data[0]);
  195. Block *b = cpu_buf->block;
  196. /* we might have run out. */
  197. if ((! b) || (b->lim - b->wp) < size) {
  198. if (b){
  199. qibwrite(opq, b);
  200. }
  201. /* For now. Later, we will grab a block off the
  202. * emptyblock queue.
  203. */
  204. cpu_buf->block = b = iallocb(oprofile_cpu_buffer_size);
  205. if (!b) {
  206. print("%s: fail\n", __func__);
  207. //print_func_exit();
  208. return nil;
  209. }
  210. }
  211. entry->sample = (void *)b->wp;
  212. entry->size = size;
  213. entry->data = entry->sample->data;
  214. b->wp += totalsize;
  215. //print_func_exit();
  216. return b;
  217. }
  218. static int
  219. op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
  220. int is_kernel, Proc *proc)
  221. {
  222. Mach *m = machp();
  223. //print_func_entry();
  224. // Block *b; this gets some bizarre gcc set but not used error. Block *b;
  225. struct op_entry entry;
  226. unsigned long flags;
  227. int size;
  228. flags = 0;
  229. if (waserror()) {
  230. poperror();
  231. print("%s: failed\n", __func__);
  232. //print_func_exit();
  233. return 1;
  234. }
  235. if (backtrace)
  236. flags |= TRACE_BEGIN;
  237. /* notice a switch from user->kernel or vice versa */
  238. is_kernel = ! !is_kernel;
  239. if (cpu_buf->last_is_kernel != is_kernel) {
  240. cpu_buf->last_is_kernel = is_kernel;
  241. flags |= KERNEL_CTX_SWITCH;
  242. if (is_kernel)
  243. flags |= IS_KERNEL;
  244. }
  245. /* notice a proc switch */
  246. if (cpu_buf->last_proc != proc) {
  247. cpu_buf->last_proc = proc;
  248. flags |= USER_CTX_SWITCH;
  249. }
  250. if (!flags) {
  251. poperror();
  252. /* nothing to do */
  253. //print_func_exit();
  254. return 0;
  255. }
  256. if (flags & USER_CTX_SWITCH)
  257. size = 1;
  258. else
  259. size = 0;
  260. op_cpu_buffer_write_reserve(cpu_buf, &entry, size);
  261. entry.sample->eip = ESCAPE_CODE;
  262. entry.sample->event = flags;
  263. if (size)
  264. op_cpu_buffer_add_data(&entry, (unsigned long)proc);
  265. poperror();
  266. //print_func_exit();
  267. return 0;
  268. }
  269. static inline int
  270. op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
  271. unsigned long pc, unsigned long event)
  272. {
  273. Mach *m = machp();
  274. //print_func_entry();
  275. struct op_entry entry;
  276. struct op_sample *sample;
  277. // Block *b; this gets some bizarre gcc set but not used error. Block *b;
  278. if (waserror()) {
  279. poperror();
  280. print("%s: failed\n", __func__);
  281. //print_func_exit();
  282. return 1;
  283. }
  284. op_cpu_buffer_write_reserve(cpu_buf, &entry, 0);
  285. sample = entry.sample;
  286. sample->eip = pc;
  287. sample->event = event;
  288. poperror();
  289. //print_func_exit();
  290. return 0;
  291. }
  292. /*
  293. * This must be safe from any context.
  294. *
  295. * is_kernel is needed because on some architectures you cannot
  296. * tell if you are in kernel or user space simply by looking at
  297. * pc. We tag this in the buffer by generating kernel enter/exit
  298. * events whenever is_kernel changes
  299. */
  300. static int
  301. log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
  302. unsigned long backtrace, int is_kernel, unsigned long event,
  303. Proc *proc)
  304. {
  305. //print_func_entry();
  306. Proc *tsk = proc ? proc : machp()->externup;
  307. cpu_buf->sample_received++;
  308. if (pc == ESCAPE_CODE) {
  309. cpu_buf->sample_invalid_eip++;
  310. //print_func_exit();
  311. return 0;
  312. }
  313. /* ah, so great. op_add* return 1 in event of failure.
  314. * this function returns 0 in event of failure.
  315. * what a cluster.
  316. */
  317. lock(&cpu_buf->lock);
  318. if (op_add_code(cpu_buf, backtrace, is_kernel, tsk))
  319. goto fail;
  320. if (op_add_sample(cpu_buf, pc, event))
  321. goto fail;
  322. unlock(&cpu_buf->lock);
  323. //print_func_exit();
  324. return 1;
  325. fail:
  326. cpu_buf->sample_lost_overflow++;
  327. //print_func_exit();
  328. return 0;
  329. }
  330. static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
  331. {
  332. //print_func_entry();
  333. cpu_buf->tracing = 1;
  334. //print_func_exit();
  335. }
  336. static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
  337. {
  338. //print_func_entry();
  339. cpu_buf->tracing = 0;
  340. //print_func_exit();
  341. }
  342. void oprofile_cpubuf_flushone(int core, int newbuf)
  343. {
  344. //print_func_entry();
  345. struct oprofile_cpu_buffer *cpu_buf;
  346. cpu_buf = &op_cpu_buffer[core];
  347. lock(&cpu_buf->lock);
  348. if (cpu_buf->block) {
  349. print("Core %d has data\n", core);
  350. qibwrite(opq, cpu_buf->block);
  351. print("After qibwrite in %s, opq len %d\n", __func__, qlen(opq));
  352. }
  353. if (newbuf)
  354. cpu_buf->block = iallocb(oprofile_cpu_buffer_size);
  355. else
  356. cpu_buf->block = nil;
  357. unlock(&cpu_buf->lock);
  358. //print_func_exit();
  359. }
  360. void oprofile_cpubuf_flushall(int alloc)
  361. {
  362. //print_func_entry();
  363. int core;
  364. for(core = 0; core < num_cpus; core++) {
  365. oprofile_cpubuf_flushone(core, alloc);
  366. }
  367. //print_func_exit();
  368. }
  369. void oprofile_control_trace(int onoff)
  370. {
  371. //print_func_entry();
  372. int core;
  373. struct oprofile_cpu_buffer *cpu_buf;
  374. for(core = 0; core < num_cpus; core++) {
  375. cpu_buf = &op_cpu_buffer[core];
  376. cpu_buf->tracing = onoff;
  377. if (onoff) {
  378. print("Enable tracing on %d\n", core);
  379. continue;
  380. }
  381. /* halting. Force out all buffers. */
  382. oprofile_cpubuf_flushone(core, 0);
  383. }
  384. //print_func_exit();
  385. }
  386. static inline void
  387. __oprofile_add_ext_sample(unsigned long pc,
  388. void /*struct pt_regs */ *const regs,
  389. unsigned long event, int is_kernel, Proc *proc)
  390. {
  391. //print_func_entry();
  392. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  393. unsigned long backtrace = oprofile_backtrace_depth;
  394. /*
  395. * if log_sample() fail we can't backtrace since we lost the
  396. * source of this event
  397. */
  398. if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, proc))
  399. /* failed */
  400. {
  401. //print_func_exit();
  402. return;
  403. }
  404. if (!backtrace) {
  405. //print_func_exit();
  406. return;
  407. }
  408. #if 0
  409. oprofile_begin_trace(cpu_buf);
  410. oprofile_ops.backtrace(regs, backtrace);
  411. oprofile_end_trace(cpu_buf);
  412. #endif
  413. //print_func_exit();
  414. }
  415. void oprofile_add_ext_hw_sample(unsigned long pc,
  416. Ureg *regs,
  417. unsigned long event, int is_kernel,
  418. Proc *proc)
  419. {
  420. //print_func_entry();
  421. __oprofile_add_ext_sample(pc, regs, event, is_kernel, proc);
  422. //print_func_exit();
  423. }
  424. void oprofile_add_ext_sample(unsigned long pc,
  425. void /*struct pt_regs */ *const regs,
  426. unsigned long event, int is_kernel)
  427. {
  428. //print_func_entry();
  429. __oprofile_add_ext_sample(pc, regs, event, is_kernel, nil);
  430. //print_func_exit();
  431. }
  432. void oprofile_add_sample(void /*struct pt_regs */ *const regs,
  433. unsigned long event)
  434. {
  435. //print_func_entry();
  436. int is_kernel;
  437. unsigned long pc;
  438. if (regs) {
  439. is_kernel = 0; // FIXME!user_mode(regs);
  440. pc = 0; // FIXME profile_pc(regs);
  441. } else {
  442. is_kernel = 0; /* This value will not be used */
  443. pc = ESCAPE_CODE; /* as this causes an early return. */
  444. }
  445. __oprofile_add_ext_sample(pc, regs, event, is_kernel, nil);
  446. //print_func_exit();
  447. }
  448. /*
  449. * Add samples with data to the ring buffer.
  450. *
  451. * Use oprofile_add_data(&entry, val) to add data and
  452. * oprofile_write_commit(&entry) to commit the sample.
  453. */
  454. void
  455. oprofile_write_reserve(struct op_entry *entry,
  456. Ureg *regs,
  457. unsigned long pc, int code, int size)
  458. {
  459. Mach *m = machp();
  460. //print_func_entry();
  461. struct op_sample *sample;
  462. // Block *b; this gets some bizarre gcc set but not used error. Block *b;
  463. int is_kernel = 0; // FIXME!user_mode(regs);
  464. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  465. if (waserror()) {
  466. print("%s: failed\n", __func__);
  467. poperror();
  468. goto fail;
  469. }
  470. cpu_buf->sample_received++;
  471. /* no backtraces for samples with data */
  472. if (op_add_code(cpu_buf, 0, is_kernel,machp()->externup))
  473. goto fail;
  474. op_cpu_buffer_write_reserve(cpu_buf, entry, size + 2);
  475. sample = entry->sample;
  476. sample->eip = ESCAPE_CODE;
  477. sample->event = 0; /* no flags */
  478. op_cpu_buffer_add_data(entry, code);
  479. op_cpu_buffer_add_data(entry, pc);
  480. poperror();
  481. //print_func_exit();
  482. return;
  483. fail:
  484. entry->event = nil;
  485. cpu_buf->sample_lost_overflow++;
  486. //print_func_exit();
  487. }
  488. int oprofile_add_data(struct op_entry *entry, unsigned long val)
  489. {
  490. //print_func_entry();
  491. if (!entry->event) {
  492. //print_func_exit();
  493. return 0;
  494. }
  495. //print_func_exit();
  496. return op_cpu_buffer_add_data(entry, val);
  497. }
  498. int oprofile_add_data64(struct op_entry *entry, uint64_t val)
  499. {
  500. //print_func_entry();
  501. if (!entry->event) {
  502. //print_func_exit();
  503. return 0;
  504. }
  505. if (op_cpu_buffer_get_size(entry) < 2)
  506. /*
  507. * the function returns 0 to indicate a too small
  508. * buffer, even if there is some space left
  509. */
  510. {
  511. //print_func_exit();
  512. return 0;
  513. }
  514. if (!op_cpu_buffer_add_data(entry, (uint32_t) val)) {
  515. //print_func_exit();
  516. return 0;
  517. }
  518. //print_func_exit();
  519. return op_cpu_buffer_add_data(entry, (uint32_t) (val >> 32));
  520. }
  521. int oprofile_write_commit(struct op_entry *entry)
  522. {
  523. //print_func_entry();
  524. /* not much to do at present. In future, we might write the Block
  525. * to opq.
  526. */
  527. //print_func_exit();
  528. return 0;
  529. }
  530. void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
  531. {
  532. //print_func_entry();
  533. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  534. log_sample(cpu_buf, pc, 0, is_kernel, event, nil);
  535. //print_func_exit();
  536. }
  537. void oprofile_add_trace(unsigned long pc)
  538. {
  539. if (! op_cpu_buffer)
  540. return;
  541. //print_func_entry();
  542. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  543. if (!cpu_buf->tracing) {
  544. //print_func_exit();
  545. return;
  546. }
  547. /*
  548. * broken frame can give an eip with the same value as an
  549. * escape code, abort the trace if we get it
  550. */
  551. if (pc == ESCAPE_CODE)
  552. goto fail;
  553. if (op_add_sample(cpu_buf, pc, fastticks2ns(rdtsc())))
  554. goto fail;
  555. //print_func_exit();
  556. return;
  557. fail:
  558. print("%s: fail. Turning of tracing on cpu %d\n", machp()->machno);
  559. cpu_buf->tracing = 0;
  560. cpu_buf->backtrace_aborted++;
  561. //print_func_exit();
  562. return;
  563. }
  564. /* Format for samples:
  565. * first word:
  566. * high 8 bits is ee, which is an invalid address on amd64.
  567. * next 8 bits is protocol version
  568. * next 16 bits is unused, MBZ. Later, we can make it a packet type.
  569. * next 16 bits is core id
  570. * next 8 bits is unused
  571. * next 8 bits is # PCs following. This should be at least 1, for one EIP.
  572. *
  573. * second word is time in ns.
  574. *
  575. * Third and following words are PCs, there must be at least one of them.
  576. */
  577. void oprofile_add_backtrace(uintptr_t pc, uintptr_t fp)
  578. {
  579. /* version 1. */
  580. uint64_t descriptor = 0xee01ULL<<48;
  581. if (! op_cpu_buffer)
  582. return;
  583. //print_func_entry();
  584. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  585. if (!cpu_buf->tracing) {
  586. //print_func_exit();
  587. return;
  588. }
  589. struct op_entry entry;
  590. struct op_sample *sample;
  591. // Block *b; this gets some bizarre gcc set but not used error. Block *b;
  592. uint64_t event = fastticks2ns(rdtsc());
  593. uintptr_t bt_pcs[oprofile_backtrace_depth];
  594. int nr_pcs;
  595. nr_pcs = backtrace_list(pc, fp, bt_pcs, oprofile_backtrace_depth);
  596. /* write_reserve always assumes passed-in-size + 2.
  597. * backtrace_depth should always be > 0.
  598. */
  599. if (!op_cpu_buffer_write_reserve(cpu_buf, &entry, nr_pcs))
  600. return;
  601. /* we are changing the sample format, but not the struct
  602. * member names yet. Later, assuming this works out.
  603. */
  604. descriptor |= (machp()->machno << 16) | nr_pcs;
  605. sample = entry.sample;
  606. sample->eip = descriptor;
  607. sample->event = event;
  608. memmove(sample->data, bt_pcs, sizeof(uintptr_t) * nr_pcs);
  609. //print_func_exit();
  610. return;
  611. }
  612. void oprofile_add_userpc(uintptr_t pc)
  613. {
  614. struct oprofile_cpu_buffer *cpu_buf;
  615. uint32_t pcoreid = machp()->machno;
  616. struct op_entry entry;
  617. // Block *b; this gets some bizarre gcc set but not used error.
  618. uint64_t descriptor = (0xee01ULL << 48) | (pcoreid << 16) | 1;
  619. if (!op_cpu_buffer)
  620. return;
  621. cpu_buf = &op_cpu_buffer[pcoreid];
  622. if (!cpu_buf->tracing)
  623. return;
  624. /* write_reserve always assumes passed-in-size + 2. need room for 1 PC. */
  625. Block *b = op_cpu_buffer_write_reserve(cpu_buf, &entry, 1);
  626. if (!b)
  627. return;
  628. entry.sample->eip = descriptor;
  629. entry.sample->event = fastticks2ns(rdtsc());
  630. /* entry.sample->data == entry.data */
  631. assert(entry.sample->data == entry.data);
  632. *entry.sample->data = pc;
  633. }
  634. int
  635. oproflen(void)
  636. {
  637. return qlen(opq);
  638. }
  639. /* return # bytes read, or 0 if profiling is off, or block if profiling on and no data.
  640. */
  641. int
  642. oprofread(void *va, int n)
  643. {
  644. int len = qlen(opq);
  645. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  646. if (len == 0) {
  647. if (cpu_buf->tracing == 0)
  648. return 0;
  649. }
  650. len = qread(opq, va, n);
  651. return len;
  652. }