cpu_buffer.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762
  1. /**
  2. * @file cpu_buffer.c
  3. *
  4. * @remark Copyright 2002-2009 OProfile authors
  5. * @remark Read the file COPYING
  6. *
  7. * @author John Levon <levon@movementarian.org>
  8. * @author Barry Kasindorf <barry.kasindorf@amd.com>
  9. * @author Robert Richter <robert.richter@amd.com>
  10. *
  11. * Each CPU has a local buffer that stores PC value/event
  12. * pairs. We also log context switches when we notice them.
  13. * Eventually each CPU's buffer is processed into the global
  14. * event buffer by sync_buffer().
  15. *
  16. * We use a local buffer for two reasons: an NMI or similar
  17. * interrupt cannot synchronise, and high sampling rates
  18. * would lead to catastrophic global synchronisation if
  19. * a global buffer was used.
  20. */
  21. #include "u.h"
  22. #include "../port/lib.h"
  23. #include "mem.h"
  24. #include "dat.h"
  25. #include "fns.h"
  26. #include "../port/error.h"
  27. #include "cpu_buffer.h"
  28. #include <oprofile.h>
  29. #define OP_BUFFER_FLAGS 0
  30. int num_cpus = 1; // Probably this many cpus (will get increased by squidboy if > 1)
  31. /* we allocate an array of these and set the pointer in mach */
  32. struct oprofile_cpu_buffer *op_cpu_buffer;
  33. /* this one queue is used by #K to get all events. */
  34. static Queue *opq;
  35. /* this is run from core 0 for all cpu buffers. */
  36. //static void wq_sync_buffer(void);
  37. unsigned long oprofile_cpu_buffer_size = 65536;
  38. unsigned long oprofile_backtrace_depth = 16;
  39. #define DEFAULT_TIMER_EXPIRE (HZ / 10)
  40. static int work_enabled;
  41. /*
  42. * Resets the cpu buffer to a sane state.
  43. *
  44. * reset these to invalid values; the next sample collected will
  45. * populate the buffer with proper values to initialize the buffer
  46. */
  47. // TODO. We hate #if 0. Sorry.
  48. #if 0
  49. static inline void op_cpu_buffer_reset(int cpu)
  50. {
  51. //print_func_entry();
  52. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  53. cpu_buf->last_is_kernel = -1;
  54. cpu_buf->last_proc = nil;
  55. //print_func_exit();
  56. }
  57. #endif
  58. /* returns the remaining free size of data in the entry */
  59. static inline
  60. int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val)
  61. {
  62. //print_func_entry();
  63. if (!entry->size) {
  64. //print_func_exit();
  65. return 0;
  66. }
  67. *entry->data = val;
  68. entry->size--;
  69. entry->data++;
  70. //print_func_exit();
  71. return entry->size;
  72. }
  73. /* returns the size of data in the entry */
  74. static inline int op_cpu_buffer_get_size(struct op_entry *entry)
  75. {
  76. //print_func_entry();
  77. //print_func_exit();
  78. return entry->size;
  79. }
  80. /* returns 0 if empty or the size of data including the current value */
  81. #if 0
  82. static inline
  83. int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val)
  84. {
  85. //print_func_entry();
  86. int size = entry->size;
  87. if (!size) {
  88. //print_func_exit();
  89. return 0;
  90. }
  91. *val = *entry->data;
  92. entry->size--;
  93. entry->data++;
  94. //print_func_exit();
  95. return size;
  96. }
  97. #endif
  98. unsigned long oprofile_get_cpu_buffer_size(void)
  99. {
  100. //print_func_entry();
  101. //print_func_exit();
  102. return oprofile_cpu_buffer_size;
  103. }
  104. void oprofile_cpu_buffer_inc_smpl_lost(void)
  105. {
  106. //print_func_entry();
  107. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  108. cpu_buf->sample_lost_overflow++;
  109. //print_func_exit();
  110. }
  111. void free_cpu_buffers(void)
  112. {
  113. //print_func_entry();
  114. free(op_cpu_buffer);
  115. /* we can just leave the queue set up; it will then always return EOF */
  116. //print_func_exit();
  117. }
  118. #define RB_EVENT_HDR_SIZE 4
  119. int alloc_cpu_buffers(void)
  120. {
  121. //print_func_entry();
  122. /* should probably start using waserror() here. The fail stuff just gets
  123. * ugly.
  124. */
  125. int i;
  126. unsigned long buffer_size = oprofile_cpu_buffer_size;
  127. /* this can get called lots of times. Things might have been freed.
  128. * So be careful.
  129. */
  130. /* what limit? No idea. */
  131. if (!opq)
  132. opq = qopen(1024, 0, nil, nil);
  133. if (!opq)
  134. goto fail;
  135. /* we *really* don't want to block. Losing data is better. */
  136. qnoblock(opq, 1);
  137. if (!op_cpu_buffer) {
  138. op_cpu_buffer = smalloc(sizeof(*op_cpu_buffer) * num_cpus);
  139. if (!op_cpu_buffer)
  140. goto fail;
  141. for (i = 0; i < num_cpus; i++) {
  142. struct oprofile_cpu_buffer *b = &op_cpu_buffer[i];
  143. b->last_proc = nil;
  144. b->last_is_kernel = -1;
  145. b->tracing = 0;
  146. b->buffer_size = buffer_size;
  147. b->sample_received = 0;
  148. b->sample_lost_overflow = 0;
  149. b->backtrace_aborted = 0;
  150. b->sample_invalid_eip = 0;
  151. b->cpu = i;
  152. b->fullqueue = qopen(1024, Qmsg, nil, nil);
  153. b->emptyqueue = qopen(1024, Qmsg, nil, nil);
  154. }
  155. }
  156. //print_func_exit();
  157. return 0;
  158. fail:
  159. free_cpu_buffers();
  160. //print_func_exit();
  161. panic("alloc_cpu_buffers");
  162. return -1;
  163. }
  164. void start_cpu_work(void)
  165. {
  166. //print_func_entry();
  167. work_enabled = 1;
  168. //print_func_exit();
  169. }
  170. void end_cpu_work(void)
  171. {
  172. //print_func_entry();
  173. work_enabled = 0;
  174. //print_func_exit();
  175. }
  176. /* placeholder. Not used yet.
  177. */
  178. void flush_cpu_work(void)
  179. {
  180. //print_func_entry();
  181. //struct oprofile_cpu_buffer *b = &op_cpu_buffer[machp()->machno];
  182. //print_func_exit();
  183. }
  184. /* Not used since we're not doing per-cpu buffering yet.
  185. */
  186. struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
  187. {
  188. //print_func_entry();
  189. //print_func_exit();
  190. return nil;
  191. }
  192. static Block *op_cpu_buffer_write_reserve(struct oprofile_cpu_buffer *cpu_buf,
  193. struct op_entry *entry, int size)
  194. {
  195. //print_func_entry();
  196. // Block *b; this gets some bizarre gcc set but not used error.
  197. int totalsize = sizeof(struct op_sample) +
  198. size * sizeof(entry->sample->data[0]);
  199. if (totalsize > oprofile_cpu_buffer_size) {
  200. print("%s: totalsize %d, oprofile_cpu_buffer_size %d\n", totalsize, oprofile_cpu_buffer_size);
  201. panic("fix oprofile");
  202. }
  203. Block *b = cpu_buf->block;
  204. /* we might have run out. */
  205. if ((! b) || (b->lim - b->wp) < totalsize) {
  206. if (b){
  207. qibwrite(opq, b);
  208. }
  209. /* For now. Later, we will grab a block off the
  210. * emptyblock queue.
  211. */
  212. cpu_buf->block = b = iallocb(oprofile_cpu_buffer_size);
  213. if (!b) {
  214. print("%s: fail\n", __func__);
  215. //print_func_exit();
  216. return nil;
  217. }
  218. }
  219. entry->sample = (void *)b->wp;
  220. entry->size = size;
  221. entry->data = entry->sample->data;
  222. b->wp += totalsize;
  223. //print_func_exit();
  224. return b;
  225. }
  226. static int
  227. op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
  228. int is_kernel, Proc *proc)
  229. {
  230. Proc *up = externup();
  231. //print_func_entry();
  232. // Block *b; this gets some bizarre gcc set but not used error. Block *b;
  233. struct op_entry entry;
  234. unsigned long flags;
  235. int size;
  236. flags = 0;
  237. if (waserror()) {
  238. poperror();
  239. print("%s: failed\n", __func__);
  240. //print_func_exit();
  241. return 1;
  242. }
  243. if (backtrace)
  244. flags |= TRACE_BEGIN;
  245. /* notice a switch from user->kernel or vice versa */
  246. is_kernel = ! !is_kernel;
  247. if (cpu_buf->last_is_kernel != is_kernel) {
  248. cpu_buf->last_is_kernel = is_kernel;
  249. flags |= KERNEL_CTX_SWITCH;
  250. if (is_kernel)
  251. flags |= IS_KERNEL;
  252. }
  253. /* notice a proc switch */
  254. if (cpu_buf->last_proc != proc) {
  255. cpu_buf->last_proc = proc;
  256. flags |= USER_CTX_SWITCH;
  257. }
  258. if (!flags) {
  259. poperror();
  260. /* nothing to do */
  261. //print_func_exit();
  262. return 0;
  263. }
  264. if (flags & USER_CTX_SWITCH)
  265. size = 1;
  266. else
  267. size = 0;
  268. op_cpu_buffer_write_reserve(cpu_buf, &entry, size);
  269. entry.sample->eip = ESCAPE_CODE;
  270. entry.sample->event = flags;
  271. if (size)
  272. op_cpu_buffer_add_data(&entry, (unsigned long)proc);
  273. poperror();
  274. //print_func_exit();
  275. return 0;
  276. }
  277. static inline int
  278. op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
  279. unsigned long pc, unsigned long event)
  280. {
  281. Proc *up = externup();
  282. //print_func_entry();
  283. struct op_entry entry;
  284. struct op_sample *sample;
  285. // Block *b; this gets some bizarre gcc set but not used error. Block *b;
  286. if (waserror()) {
  287. poperror();
  288. print("%s: failed\n", __func__);
  289. //print_func_exit();
  290. return 1;
  291. }
  292. op_cpu_buffer_write_reserve(cpu_buf, &entry, 0);
  293. sample = entry.sample;
  294. sample->eip = pc;
  295. sample->event = event;
  296. poperror();
  297. //print_func_exit();
  298. return 0;
  299. }
  300. /*
  301. * This must be safe from any context.
  302. *
  303. * is_kernel is needed because on some architectures you cannot
  304. * tell if you are in kernel or user space simply by looking at
  305. * pc. We tag this in the buffer by generating kernel enter/exit
  306. * events whenever is_kernel changes
  307. */
  308. static int
  309. log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
  310. unsigned long backtrace, int is_kernel, unsigned long event,
  311. Proc *proc)
  312. {
  313. //print_func_entry();
  314. Proc *tsk = proc ? proc : externup();
  315. cpu_buf->sample_received++;
  316. if (pc == ESCAPE_CODE) {
  317. cpu_buf->sample_invalid_eip++;
  318. //print_func_exit();
  319. return 0;
  320. }
  321. /* ah, so great. op_add* return 1 in event of failure.
  322. * this function returns 0 in event of failure.
  323. * what a cluster.
  324. */
  325. lock(&cpu_buf->lock);
  326. if (op_add_code(cpu_buf, backtrace, is_kernel, tsk))
  327. goto fail;
  328. if (op_add_sample(cpu_buf, pc, event))
  329. goto fail;
  330. unlock(&cpu_buf->lock);
  331. //print_func_exit();
  332. return 1;
  333. fail:
  334. cpu_buf->sample_lost_overflow++;
  335. //print_func_exit();
  336. return 0;
  337. }
  338. #if 0
  339. static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
  340. {
  341. //print_func_entry();
  342. cpu_buf->tracing = 1;
  343. //print_func_exit();
  344. }
  345. static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
  346. {
  347. //print_func_entry();
  348. cpu_buf->tracing = 0;
  349. //print_func_exit();
  350. }
  351. #endif
  352. void oprofile_cpubuf_flushone(int core, int newbuf)
  353. {
  354. //print_func_entry();
  355. struct oprofile_cpu_buffer *cpu_buf;
  356. cpu_buf = &op_cpu_buffer[core];
  357. lock(&cpu_buf->lock);
  358. if (cpu_buf->block) {
  359. print("Core %d has data\n", core);
  360. qibwrite(opq, cpu_buf->block);
  361. print("After qibwrite in %s, opq len %d\n", __func__, qlen(opq));
  362. }
  363. if (newbuf)
  364. cpu_buf->block = iallocb(oprofile_cpu_buffer_size);
  365. else
  366. cpu_buf->block = nil;
  367. unlock(&cpu_buf->lock);
  368. //print_func_exit();
  369. }
  370. void oprofile_cpubuf_flushall(int alloc)
  371. {
  372. //print_func_entry();
  373. int core;
  374. for(core = 0; core < num_cpus; core++) {
  375. oprofile_cpubuf_flushone(core, alloc);
  376. }
  377. //print_func_exit();
  378. }
  379. void oprofile_control_trace(int onoff)
  380. {
  381. //print_func_entry();
  382. int core;
  383. struct oprofile_cpu_buffer *cpu_buf;
  384. for(core = 0; core < num_cpus; core++) {
  385. cpu_buf = &op_cpu_buffer[core];
  386. cpu_buf->tracing = onoff;
  387. if (onoff) {
  388. print("Enable tracing on cpu %d\n", core);
  389. continue;
  390. }
  391. /* halting. Force out all buffers. */
  392. oprofile_cpubuf_flushone(core, 0);
  393. }
  394. //print_func_exit();
  395. }
  396. static inline void
  397. __oprofile_add_ext_sample(unsigned long pc,
  398. void /*struct pt_regs */ *const regs,
  399. unsigned long event, int is_kernel, Proc *proc)
  400. {
  401. //print_func_entry();
  402. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  403. unsigned long backtrace = oprofile_backtrace_depth;
  404. /*
  405. * if log_sample() fail we can't backtrace since we lost the
  406. * source of this event
  407. */
  408. if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, proc))
  409. /* failed */
  410. {
  411. //print_func_exit();
  412. return;
  413. }
  414. if (!backtrace) {
  415. //print_func_exit();
  416. return;
  417. }
  418. #if 0
  419. oprofile_begin_trace(cpu_buf);
  420. oprofile_ops.backtrace(regs, backtrace);
  421. oprofile_end_trace(cpu_buf);
  422. #endif
  423. //print_func_exit();
  424. }
  425. void oprofile_add_ext_hw_sample(unsigned long pc,
  426. Ureg *regs,
  427. unsigned long event, int is_kernel,
  428. Proc *proc)
  429. {
  430. //print_func_entry();
  431. __oprofile_add_ext_sample(pc, regs, event, is_kernel, proc);
  432. //print_func_exit();
  433. }
  434. void oprofile_add_ext_sample(unsigned long pc,
  435. void /*struct pt_regs */ *const regs,
  436. unsigned long event, int is_kernel)
  437. {
  438. //print_func_entry();
  439. __oprofile_add_ext_sample(pc, regs, event, is_kernel, nil);
  440. //print_func_exit();
  441. }
  442. void oprofile_add_sample(void /*struct pt_regs */ *const regs,
  443. unsigned long event)
  444. {
  445. //print_func_entry();
  446. int is_kernel;
  447. unsigned long pc;
  448. if (regs) {
  449. is_kernel = 0; // FIXME!user_mode(regs);
  450. pc = 0; // FIXME profile_pc(regs);
  451. } else {
  452. is_kernel = 0; /* This value will not be used */
  453. pc = ESCAPE_CODE; /* as this causes an early return. */
  454. }
  455. __oprofile_add_ext_sample(pc, regs, event, is_kernel, nil);
  456. //print_func_exit();
  457. }
  458. /*
  459. * Add samples with data to the ring buffer.
  460. *
  461. * Use oprofile_add_data(&entry, val) to add data and
  462. * oprofile_write_commit(&entry) to commit the sample.
  463. */
  464. void
  465. oprofile_write_reserve(struct op_entry *entry,
  466. Ureg *regs,
  467. unsigned long pc, int code, int size)
  468. {
  469. Proc *up = externup();
  470. //print_func_entry();
  471. struct op_sample *sample;
  472. // Block *b; this gets some bizarre gcc set but not used error. Block *b;
  473. int is_kernel = 0; // FIXME!user_mode(regs);
  474. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  475. if (waserror()) {
  476. print("%s: failed\n", __func__);
  477. poperror();
  478. goto fail;
  479. }
  480. cpu_buf->sample_received++;
  481. /* no backtraces for samples with data */
  482. if (op_add_code(cpu_buf, 0, is_kernel, externup()))
  483. goto fail;
  484. op_cpu_buffer_write_reserve(cpu_buf, entry, size + 2);
  485. sample = entry->sample;
  486. sample->eip = ESCAPE_CODE;
  487. sample->event = 0; /* no flags */
  488. op_cpu_buffer_add_data(entry, code);
  489. op_cpu_buffer_add_data(entry, pc);
  490. poperror();
  491. //print_func_exit();
  492. return;
  493. fail:
  494. entry->event = nil;
  495. cpu_buf->sample_lost_overflow++;
  496. //print_func_exit();
  497. }
  498. int oprofile_add_data(struct op_entry *entry, unsigned long val)
  499. {
  500. //print_func_entry();
  501. if (!entry->event) {
  502. //print_func_exit();
  503. return 0;
  504. }
  505. //print_func_exit();
  506. return op_cpu_buffer_add_data(entry, val);
  507. }
  508. int oprofile_add_data64(struct op_entry *entry, uint64_t val)
  509. {
  510. //print_func_entry();
  511. if (!entry->event) {
  512. //print_func_exit();
  513. return 0;
  514. }
  515. if (op_cpu_buffer_get_size(entry) < 2)
  516. /*
  517. * the function returns 0 to indicate a too small
  518. * buffer, even if there is some space left
  519. */
  520. {
  521. //print_func_exit();
  522. return 0;
  523. }
  524. if (!op_cpu_buffer_add_data(entry, (uint32_t) val)) {
  525. //print_func_exit();
  526. return 0;
  527. }
  528. //print_func_exit();
  529. return op_cpu_buffer_add_data(entry, (uint32_t) (val >> 32));
  530. }
  531. int oprofile_write_commit(struct op_entry *entry)
  532. {
  533. //print_func_entry();
  534. /* not much to do at present. In future, we might write the Block
  535. * to opq.
  536. */
  537. //print_func_exit();
  538. return 0;
  539. }
  540. void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
  541. {
  542. //print_func_entry();
  543. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  544. log_sample(cpu_buf, pc, 0, is_kernel, event, nil);
  545. //print_func_exit();
  546. }
  547. void oprofile_add_trace(unsigned long pc)
  548. {
  549. if (! op_cpu_buffer)
  550. return;
  551. //print_func_entry();
  552. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  553. if (!cpu_buf->tracing) {
  554. //print_func_exit();
  555. return;
  556. }
  557. /*
  558. * broken frame can give an eip with the same value as an
  559. * escape code, abort the trace if we get it
  560. */
  561. if (pc == ESCAPE_CODE)
  562. goto fail;
  563. if (op_add_sample(cpu_buf, pc, fastticks2ns(rdtsc())))
  564. goto fail;
  565. //print_func_exit();
  566. return;
  567. fail:
  568. print("%s: fail. Turning of tracing on cpu %d\n", machp()->machno);
  569. cpu_buf->tracing = 0;
  570. cpu_buf->backtrace_aborted++;
  571. //print_func_exit();
  572. return;
  573. }
  574. /* Format for samples:
  575. * first word:
  576. * high 8 bits is ee, which is an invalid address on amd64.
  577. * next 8 bits is protocol version
  578. * next 16 bits is unused, MBZ. Later, we can make it a packet type.
  579. * next 16 bits is core id
  580. * next 8 bits is unused
  581. * next 8 bits is # PCs following. This should be at least 1, for one EIP.
  582. *
  583. * second word is time in ns.
  584. *
  585. * Third and following words are PCs, there must be at least one of them.
  586. */
  587. void oprofile_add_backtrace(uintptr_t pc, uintptr_t fp)
  588. {
  589. /* version 1. */
  590. uint64_t descriptor = 0xee01ULL<<48;
  591. if (! op_cpu_buffer)
  592. return;
  593. //print_func_entry();
  594. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  595. if (!cpu_buf->tracing) {
  596. //print_func_exit();
  597. return;
  598. }
  599. struct op_entry entry;
  600. struct op_sample *sample;
  601. // Block *b; this gets some bizarre gcc set but not used error. Block *b;
  602. uint64_t event = fastticks2ns(rdtsc());
  603. uintptr_t bt_pcs[oprofile_backtrace_depth];
  604. int nr_pcs;
  605. nr_pcs = backtrace_list(pc, fp, bt_pcs, oprofile_backtrace_depth);
  606. /* write_reserve always assumes passed-in-size + 2.
  607. * backtrace_depth should always be > 0.
  608. */
  609. if (!op_cpu_buffer_write_reserve(cpu_buf, &entry, nr_pcs))
  610. return;
  611. /* we are changing the sample format, but not the struct
  612. * member names yet. Later, assuming this works out.
  613. */
  614. descriptor |= (machp()->machno << 16) | nr_pcs;
  615. sample = entry.sample;
  616. sample->eip = descriptor;
  617. sample->event = event;
  618. memmove(sample->data, bt_pcs, sizeof(uintptr_t) * nr_pcs);
  619. //print_func_exit();
  620. return;
  621. }
  622. void oprofile_add_userpc(uintptr_t pc)
  623. {
  624. struct oprofile_cpu_buffer *cpu_buf;
  625. uint32_t pcoreid = machp()->machno;
  626. struct op_entry entry;
  627. // Block *b; this gets some bizarre gcc set but not used error.
  628. uint64_t descriptor = (0xee01ULL << 48) | (pcoreid << 16) | 1;
  629. if (!op_cpu_buffer)
  630. return;
  631. cpu_buf = &op_cpu_buffer[pcoreid];
  632. if (!cpu_buf->tracing)
  633. return;
  634. /* write_reserve always assumes passed-in-size + 2. need room for 1 PC. */
  635. Block *b = op_cpu_buffer_write_reserve(cpu_buf, &entry, 1);
  636. if (!b)
  637. return;
  638. entry.sample->eip = descriptor;
  639. entry.sample->event = fastticks2ns(rdtsc());
  640. /* entry.sample->data == entry.data */
  641. assert(entry.sample->data == entry.data);
  642. *entry.sample->data = pc;
  643. }
  644. int
  645. oproflen(void)
  646. {
  647. return qlen(opq);
  648. }
  649. /* return # bytes read, or 0 if profiling is off, or block if profiling on and no data.
  650. */
  651. int
  652. oprofread(void *va, int n)
  653. {
  654. int len = qlen(opq);
  655. if (!len)
  656. return 0;
  657. struct oprofile_cpu_buffer *cpu_buf = &op_cpu_buffer[machp()->machno];
  658. if (cpu_buf->tracing == 1){
  659. error("can't read profiling while trace is running");
  660. return 0;
  661. }
  662. len = qread(opq, va, n);
  663. return len;
  664. }