mpstat.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967
  1. /* vi: set sw=4 ts=4: */
  2. /*
  3. * Per-processor statistics, based on sysstat version 9.1.2 by Sebastien Godard
  4. *
  5. * Copyright (C) 2010 Marek Polacek <mmpolacek@gmail.com>
  6. *
  7. * Licensed under GPLv2, see file LICENSE in this source tree.
  8. */
  9. //config:config MPSTAT
  10. //config: bool "mpstat (10 kb)"
  11. //config: default y
  12. //config: help
  13. //config: Per-processor statistics
  14. //applet:IF_MPSTAT(APPLET(mpstat, BB_DIR_BIN, BB_SUID_DROP))
  15. /* shouldn't be noexec: "mpstat INTERVAL" runs indefinitely */
  16. //kbuild:lib-$(CONFIG_MPSTAT) += mpstat.o
  17. #include "libbb.h"
  18. #include <sys/utsname.h> /* struct utsname */
  19. //#define debug(fmt, ...) fprintf(stderr, fmt, ## __VA_ARGS__)
  20. #define debug(fmt, ...) ((void)0)
  21. /* Size of /proc/interrupts line, CPU data excluded */
  22. #define INTERRUPTS_LINE 64
  23. /* Maximum number of interrupts */
  24. #define NR_IRQS 256
  25. #define NR_IRQCPU_PREALLOC 3
  26. #define MAX_IRQNAME_LEN 16
  27. #define MAX_PF_NAME 512
  28. /* sysstat 9.0.6 uses width 8, but newer code which also prints /proc/softirqs
  29. * data needs more: "interrupts" in /proc/softirqs have longer names,
  30. * most are up to 8 chars, one (BLOCK_IOPOLL) is even longer.
  31. * We are printing headers in the " IRQNAME/s" form, experimentally
  32. * anything smaller than 10 chars looks ugly for /proc/softirqs stats.
  33. */
  34. #define INTRATE_SCRWIDTH 10
  35. #define INTRATE_SCRWIDTH_STR "10"
  36. /* System files */
  37. #define PROCFS_STAT "/proc/stat"
  38. #define PROCFS_INTERRUPTS "/proc/interrupts"
  39. #define PROCFS_SOFTIRQS "/proc/softirqs"
  40. #define PROCFS_UPTIME "/proc/uptime"
  41. #if 1
  42. typedef unsigned long long data_t;
  43. typedef long long idata_t;
  44. #define FMT_DATA "ll"
  45. #define DATA_MAX ULLONG_MAX
  46. #else
  47. typedef unsigned long data_t;
  48. typedef long idata_t;
  49. #define FMT_DATA "l"
  50. #define DATA_MAX ULONG_MAX
  51. #endif
  52. struct stats_irqcpu {
  53. unsigned interrupts;
  54. char irq_name[MAX_IRQNAME_LEN];
  55. };
  56. struct stats_cpu {
  57. data_t cpu_user;
  58. data_t cpu_nice;
  59. data_t cpu_system;
  60. data_t cpu_idle;
  61. data_t cpu_iowait;
  62. data_t cpu_steal;
  63. data_t cpu_irq;
  64. data_t cpu_softirq;
  65. data_t cpu_guest;
  66. };
  67. struct stats_irq {
  68. data_t irq_nr;
  69. };
  70. /* Globals. Sort by size and access frequency. */
  71. struct globals {
  72. int interval;
  73. int count;
  74. unsigned cpu_nr; /* Number of CPUs */
  75. unsigned irqcpu_nr; /* Number of interrupts per CPU */
  76. unsigned softirqcpu_nr; /* Number of soft interrupts per CPU */
  77. unsigned options;
  78. unsigned hz;
  79. unsigned cpu_bitmap_len;
  80. smallint p_option;
  81. // 9.0.6 does not do it. Try "mpstat -A 1 2" - headers are repeated!
  82. //smallint header_done;
  83. //smallint avg_header_done;
  84. unsigned char *cpu_bitmap; /* Bit 0: global, bit 1: 1st proc... */
  85. data_t global_uptime[3];
  86. data_t per_cpu_uptime[3];
  87. struct stats_cpu *st_cpu[3];
  88. struct stats_irq *st_irq[3];
  89. struct stats_irqcpu *st_irqcpu[3];
  90. struct stats_irqcpu *st_softirqcpu[3];
  91. struct tm timestamp[3];
  92. };
  93. #define G (*ptr_to_globals)
  94. #define INIT_G() do { \
  95. SET_PTR_TO_GLOBALS(xzalloc(sizeof(G))); \
  96. } while (0)
  97. /* The selected interrupts statistics (bits in G.options) */
  98. enum {
  99. D_CPU = 1 << 0,
  100. D_IRQ_SUM = 1 << 1,
  101. D_IRQ_CPU = 1 << 2,
  102. D_SOFTIRQS = 1 << 3,
  103. };
  104. /* Is option on? */
  105. static ALWAYS_INLINE int display_opt(int opt)
  106. {
  107. return (opt & G.options);
  108. }
  109. #if DATA_MAX > 0xffffffff
  110. /*
  111. * Handle overflow conditions properly for counters which can have
  112. * less bits than data_t, depending on the kernel version.
  113. */
  114. /* Surprisingly, on 32bit inlining is a size win */
  115. static ALWAYS_INLINE data_t overflow_safe_sub(data_t prev, data_t curr)
  116. {
  117. data_t v = curr - prev;
  118. if ((idata_t)v < 0 /* curr < prev - counter overflow? */
  119. && prev <= 0xffffffff /* kernel uses 32bit value for the counter? */
  120. ) {
  121. /* Add 33th bit set to 1 to curr, compensating for the overflow */
  122. /* double shift defeats "warning: left shift count >= width of type" */
  123. v += ((data_t)1 << 16) << 16;
  124. }
  125. return v;
  126. }
  127. #else
  128. static ALWAYS_INLINE data_t overflow_safe_sub(data_t prev, data_t curr)
  129. {
  130. return curr - prev;
  131. }
  132. #endif
  133. static double percent_value(data_t prev, data_t curr, data_t itv)
  134. {
  135. return ((double)overflow_safe_sub(prev, curr)) / itv * 100;
  136. }
  137. static double hz_value(data_t prev, data_t curr, data_t itv)
  138. {
  139. //bb_error_msg("curr:%lld prev:%lld G.hz:%u", curr, prev, G.hz);
  140. return ((double)overflow_safe_sub(prev, curr)) / itv * G.hz;
  141. }
  142. static ALWAYS_INLINE data_t jiffies_diff(data_t old, data_t new)
  143. {
  144. data_t diff = new - old;
  145. return (diff == 0) ? 1 : diff;
  146. }
  147. static int is_cpu_in_bitmap(unsigned cpu)
  148. {
  149. return G.cpu_bitmap[cpu >> 3] & (1 << (cpu & 7));
  150. }
  151. static void write_irqcpu_stats(struct stats_irqcpu *per_cpu_stats[],
  152. int total_irqs,
  153. data_t itv,
  154. int prev, int current,
  155. const char *prev_str, const char *current_str)
  156. {
  157. int j;
  158. int offset, cpu;
  159. struct stats_irqcpu *p0, *q0;
  160. /* Check if number of IRQs has changed */
  161. if (G.interval != 0) {
  162. for (j = 0; j <= total_irqs; j++) {
  163. p0 = &per_cpu_stats[current][j];
  164. if (p0->irq_name[0] != '\0') {
  165. q0 = &per_cpu_stats[prev][j];
  166. if (strcmp(p0->irq_name, q0->irq_name) != 0) {
  167. /* Strings are different */
  168. break;
  169. }
  170. }
  171. }
  172. }
  173. /* Print header */
  174. printf("\n%-11s CPU", prev_str);
  175. {
  176. /* A bit complex code to "buy back" space if one header is too wide.
  177. * Here's how it looks like. BLOCK_IOPOLL eats too much space,
  178. * and latter headers use smaller width to compensate:
  179. * ...BLOCK/s BLOCK_IOPOLL/s TASKLET/s SCHED/s HRTIMER/s RCU/s
  180. * ... 2.32 0.00 0.01 17.58 0.14 141.96
  181. */
  182. int expected_len = 0;
  183. int printed_len = 0;
  184. for (j = 0; j < total_irqs; j++) {
  185. p0 = &per_cpu_stats[current][j];
  186. if (p0->irq_name[0] != '\0') {
  187. int n = (INTRATE_SCRWIDTH-3) - (printed_len - expected_len);
  188. printed_len += printf(" %*s/s", n > 0 ? n : 0, skip_whitespace(p0->irq_name));
  189. expected_len += INTRATE_SCRWIDTH;
  190. }
  191. }
  192. }
  193. bb_putchar('\n');
  194. for (cpu = 1; cpu <= G.cpu_nr; cpu++) {
  195. /* Check if we want stats about this CPU */
  196. if (!is_cpu_in_bitmap(cpu) && G.p_option) {
  197. continue;
  198. }
  199. printf("%-11s %4u", current_str, cpu - 1);
  200. for (j = 0; j < total_irqs; j++) {
  201. /* IRQ field set only for proc 0 */
  202. p0 = &per_cpu_stats[current][j];
  203. /*
  204. * An empty string for irq name means that
  205. * interrupt is no longer used.
  206. */
  207. if (p0->irq_name[0] != '\0') {
  208. offset = j;
  209. q0 = &per_cpu_stats[prev][offset];
  210. /*
  211. * If we want stats for the time since boot
  212. * we have p0->irq != q0->irq.
  213. */
  214. if (strcmp(p0->irq_name, q0->irq_name) != 0
  215. && G.interval != 0
  216. ) {
  217. if (j) {
  218. offset = j - 1;
  219. q0 = &per_cpu_stats[prev][offset];
  220. }
  221. if (strcmp(p0->irq_name, q0->irq_name) != 0
  222. && (j + 1 < total_irqs)
  223. ) {
  224. offset = j + 1;
  225. q0 = &per_cpu_stats[prev][offset];
  226. }
  227. }
  228. if (strcmp(p0->irq_name, q0->irq_name) == 0
  229. || G.interval == 0
  230. ) {
  231. struct stats_irqcpu *p, *q;
  232. p = &per_cpu_stats[current][(cpu - 1) * total_irqs + j];
  233. q = &per_cpu_stats[prev][(cpu - 1) * total_irqs + offset];
  234. printf("%"INTRATE_SCRWIDTH_STR".2f",
  235. (double)(p->interrupts - q->interrupts) / itv * G.hz);
  236. } else {
  237. printf(" N/A");
  238. }
  239. }
  240. }
  241. bb_putchar('\n');
  242. }
  243. }
  244. static data_t get_per_cpu_interval(const struct stats_cpu *scc,
  245. const struct stats_cpu *scp)
  246. {
  247. return ((scc->cpu_user + scc->cpu_nice +
  248. scc->cpu_system + scc->cpu_iowait +
  249. scc->cpu_idle + scc->cpu_steal +
  250. scc->cpu_irq + scc->cpu_softirq) -
  251. (scp->cpu_user + scp->cpu_nice +
  252. scp->cpu_system + scp->cpu_iowait +
  253. scp->cpu_idle + scp->cpu_steal +
  254. scp->cpu_irq + scp->cpu_softirq));
  255. }
  256. static void print_stats_cpu_struct(const struct stats_cpu *p,
  257. const struct stats_cpu *c,
  258. data_t itv)
  259. {
  260. printf(" %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f\n",
  261. percent_value(p->cpu_user - p->cpu_guest,
  262. /**/ c->cpu_user - c->cpu_guest, itv),
  263. percent_value(p->cpu_nice , c->cpu_nice , itv),
  264. percent_value(p->cpu_system , c->cpu_system , itv),
  265. percent_value(p->cpu_iowait , c->cpu_iowait , itv),
  266. percent_value(p->cpu_irq , c->cpu_irq , itv),
  267. percent_value(p->cpu_softirq, c->cpu_softirq, itv),
  268. percent_value(p->cpu_steal , c->cpu_steal , itv),
  269. percent_value(p->cpu_guest , c->cpu_guest , itv),
  270. percent_value(p->cpu_idle , c->cpu_idle , itv)
  271. );
  272. }
  273. static void write_stats_core(int prev, int current,
  274. const char *prev_str, const char *current_str)
  275. {
  276. struct stats_cpu *scc, *scp;
  277. data_t itv, global_itv;
  278. int cpu;
  279. /* Compute time interval */
  280. itv = global_itv = jiffies_diff(G.global_uptime[prev], G.global_uptime[current]);
  281. /* Reduce interval to one CPU */
  282. if (G.cpu_nr > 1)
  283. itv = jiffies_diff(G.per_cpu_uptime[prev], G.per_cpu_uptime[current]);
  284. /* Print CPU stats */
  285. if (display_opt(D_CPU)) {
  286. ///* This is done exactly once */
  287. //if (!G.header_done) {
  288. printf("\n%-11s CPU %%usr %%nice %%sys %%iowait %%irq %%soft %%steal %%guest %%idle\n",
  289. prev_str
  290. );
  291. // G.header_done = 1;
  292. //}
  293. for (cpu = 0; cpu <= G.cpu_nr; cpu++) {
  294. data_t per_cpu_itv;
  295. /* Print stats about this particular CPU? */
  296. if (!is_cpu_in_bitmap(cpu))
  297. continue;
  298. scc = &G.st_cpu[current][cpu];
  299. scp = &G.st_cpu[prev][cpu];
  300. per_cpu_itv = global_itv;
  301. printf((cpu ? "%-11s %4u" : "%-11s all"), current_str, cpu - 1);
  302. if (cpu) {
  303. double idle;
  304. /*
  305. * If the CPU is offline, then it isn't in /proc/stat,
  306. * so all values are 0.
  307. * NB: Guest time is already included in user time.
  308. */
  309. if ((scc->cpu_user | scc->cpu_nice | scc->cpu_system |
  310. scc->cpu_iowait | scc->cpu_idle | scc->cpu_steal |
  311. scc->cpu_irq | scc->cpu_softirq) == 0
  312. ) {
  313. /*
  314. * Set current struct fields to values from prev.
  315. * iteration. Then their values won't jump from
  316. * zero, when the CPU comes back online.
  317. */
  318. *scc = *scp;
  319. idle = 0.0;
  320. goto print_zeros;
  321. }
  322. /* Compute interval again for current proc */
  323. per_cpu_itv = get_per_cpu_interval(scc, scp);
  324. if (per_cpu_itv == 0) {
  325. /*
  326. * If the CPU is tickless then there is no change in CPU values
  327. * but the sum of values is not zero.
  328. */
  329. idle = 100.0;
  330. print_zeros:
  331. printf(" %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f\n",
  332. 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, idle);
  333. continue;
  334. }
  335. }
  336. print_stats_cpu_struct(scp, scc, per_cpu_itv);
  337. }
  338. }
  339. /* Print total number of IRQs per CPU */
  340. if (display_opt(D_IRQ_SUM)) {
  341. ///* Print average header, this is done exactly once */
  342. //if (!G.avg_header_done) {
  343. printf("\n%-11s CPU intr/s\n", prev_str);
  344. // G.avg_header_done = 1;
  345. //}
  346. for (cpu = 0; cpu <= G.cpu_nr; cpu++) {
  347. data_t per_cpu_itv;
  348. /* Print stats about this CPU? */
  349. if (!is_cpu_in_bitmap(cpu))
  350. continue;
  351. per_cpu_itv = itv;
  352. printf((cpu ? "%-11s %4u" : "%-11s all"), current_str, cpu - 1);
  353. if (cpu) {
  354. scc = &G.st_cpu[current][cpu];
  355. scp = &G.st_cpu[prev][cpu];
  356. /* Compute interval again for current proc */
  357. per_cpu_itv = get_per_cpu_interval(scc, scp);
  358. if (per_cpu_itv == 0) {
  359. printf(" %9.2f\n", 0.0);
  360. continue;
  361. }
  362. }
  363. //bb_error_msg("G.st_irq[%u][%u].irq_nr:%lld - G.st_irq[%u][%u].irq_nr:%lld",
  364. // current, cpu, G.st_irq[prev][cpu].irq_nr, prev, cpu, G.st_irq[current][cpu].irq_nr);
  365. printf(" %9.2f\n", hz_value(G.st_irq[prev][cpu].irq_nr, G.st_irq[current][cpu].irq_nr, per_cpu_itv));
  366. }
  367. }
  368. if (display_opt(D_IRQ_CPU)) {
  369. write_irqcpu_stats(G.st_irqcpu, G.irqcpu_nr,
  370. itv,
  371. prev, current,
  372. prev_str, current_str
  373. );
  374. }
  375. if (display_opt(D_SOFTIRQS)) {
  376. write_irqcpu_stats(G.st_softirqcpu, G.softirqcpu_nr,
  377. itv,
  378. prev, current,
  379. prev_str, current_str
  380. );
  381. }
  382. }
  383. /*
  384. * Print the statistics
  385. */
  386. static void write_stats(int current)
  387. {
  388. char prev_time[16];
  389. char curr_time[16];
  390. strftime(prev_time, sizeof(prev_time), "%X", &G.timestamp[!current]);
  391. strftime(curr_time, sizeof(curr_time), "%X", &G.timestamp[current]);
  392. write_stats_core(!current, current, prev_time, curr_time);
  393. }
  394. static void write_stats_avg(int current)
  395. {
  396. write_stats_core(2, current, "Average:", "Average:");
  397. }
  398. /*
  399. * Read CPU statistics
  400. */
  401. static void get_cpu_statistics(struct stats_cpu *cpu, data_t *up, data_t *up0)
  402. {
  403. FILE *fp;
  404. char buf[1024];
  405. fp = xfopen_for_read(PROCFS_STAT);
  406. while (fgets(buf, sizeof(buf), fp)) {
  407. data_t sum;
  408. unsigned cpu_number;
  409. struct stats_cpu *cp;
  410. if (!starts_with_cpu(buf))
  411. continue; /* not "cpu" */
  412. cp = cpu; /* for "cpu " case */
  413. if (buf[3] != ' ') {
  414. /* "cpuN " */
  415. if (G.cpu_nr == 0
  416. || sscanf(buf + 3, "%u ", &cpu_number) != 1
  417. || cpu_number >= G.cpu_nr
  418. ) {
  419. continue;
  420. }
  421. cp = &cpu[cpu_number + 1];
  422. }
  423. /* Read the counters, save them */
  424. /* Not all fields have to be present */
  425. memset(cp, 0, sizeof(*cp));
  426. sscanf(buf, "%*s"
  427. " %"FMT_DATA"u %"FMT_DATA"u %"FMT_DATA"u"
  428. " %"FMT_DATA"u %"FMT_DATA"u %"FMT_DATA"u"
  429. " %"FMT_DATA"u %"FMT_DATA"u %"FMT_DATA"u",
  430. &cp->cpu_user, &cp->cpu_nice, &cp->cpu_system,
  431. &cp->cpu_idle, &cp->cpu_iowait, &cp->cpu_irq,
  432. &cp->cpu_softirq, &cp->cpu_steal, &cp->cpu_guest
  433. );
  434. /*
  435. * Compute uptime in jiffies (1/HZ), it'll be the sum of
  436. * individual CPU's uptimes.
  437. * NB: We have to omit cpu_guest, because cpu_user includes it.
  438. */
  439. sum = cp->cpu_user + cp->cpu_nice + cp->cpu_system +
  440. cp->cpu_idle + cp->cpu_iowait + cp->cpu_irq +
  441. cp->cpu_softirq + cp->cpu_steal;
  442. if (buf[3] == ' ') {
  443. /* "cpu " */
  444. *up = sum;
  445. } else {
  446. /* "cpuN " */
  447. if (cpu_number == 0 && *up0 != 0) {
  448. /* Compute uptime of single CPU */
  449. *up0 = sum;
  450. }
  451. }
  452. }
  453. fclose(fp);
  454. }
  455. /*
  456. * Read IRQs from /proc/stat
  457. */
  458. static void get_irqs_from_stat(struct stats_irq *irq)
  459. {
  460. FILE *fp;
  461. char buf[1024];
  462. fp = xfopen_for_read(PROCFS_STAT);
  463. while (fgets(buf, sizeof(buf), fp)) {
  464. //bb_error_msg("/proc/stat:'%s'", buf);
  465. if (is_prefixed_with(buf, "intr ")) {
  466. /* Read total number of IRQs since system boot */
  467. sscanf(buf + 5, "%"FMT_DATA"u", &irq->irq_nr);
  468. }
  469. }
  470. fclose(fp);
  471. }
  472. /*
  473. * Read stats from /proc/interrupts or /proc/softirqs
  474. */
  475. static void get_irqs_from_interrupts(const char *fname,
  476. struct stats_irqcpu *per_cpu_stats[],
  477. int irqs_per_cpu, int current)
  478. {
  479. FILE *fp;
  480. struct stats_irq *irq_i;
  481. struct stats_irqcpu *ic;
  482. char *buf;
  483. unsigned buflen;
  484. unsigned cpu;
  485. unsigned irq;
  486. int cpu_index[G.cpu_nr];
  487. int iindex;
  488. // Moved to caller.
  489. // Otherwise reading of /proc/softirqs
  490. // was resetting counts to 0 after we painstakingly collected them from
  491. // /proc/interrupts. Which resulted in:
  492. // 01:32:47 PM CPU intr/s
  493. // 01:32:47 PM all 591.47
  494. // 01:32:47 PM 0 0.00 <= ???
  495. // 01:32:47 PM 1 0.00 <= ???
  496. // for (cpu = 1; cpu <= G.cpu_nr; cpu++) {
  497. // G.st_irq[current][cpu].irq_nr = 0;
  498. // //bb_error_msg("G.st_irq[%u][%u].irq_nr=0", current, cpu);
  499. // }
  500. fp = fopen_for_read(fname);
  501. if (!fp)
  502. return;
  503. buflen = INTERRUPTS_LINE + 16 * G.cpu_nr;
  504. buf = xmalloc(buflen);
  505. /* Parse header and determine, which CPUs are online */
  506. iindex = 0;
  507. while (fgets(buf, buflen, fp)) {
  508. char *cp, *next;
  509. next = buf;
  510. while ((cp = strstr(next, "CPU")) != NULL
  511. && iindex < G.cpu_nr
  512. ) {
  513. cpu = strtoul(cp + 3, &next, 10);
  514. cpu_index[iindex++] = cpu;
  515. }
  516. if (iindex) /* We found header */
  517. break;
  518. }
  519. irq = 0;
  520. while (fgets(buf, buflen, fp)
  521. && irq < irqs_per_cpu
  522. ) {
  523. int len;
  524. char last_char;
  525. char *cp;
  526. /* Skip over "IRQNAME:" */
  527. cp = strchr(buf, ':');
  528. if (!cp)
  529. continue;
  530. last_char = cp[-1];
  531. ic = &per_cpu_stats[current][irq];
  532. len = cp - buf;
  533. if (len >= sizeof(ic->irq_name)) {
  534. len = sizeof(ic->irq_name) - 1;
  535. }
  536. safe_strncpy(ic->irq_name, buf, len + 1);
  537. //bb_error_msg("%s: irq%d:'%s' buf:'%s'", fname, irq, ic->irq_name, buf);
  538. cp++;
  539. for (cpu = 0; cpu < iindex; cpu++) {
  540. char *next;
  541. ic = &per_cpu_stats[current][cpu_index[cpu] * irqs_per_cpu + irq];
  542. irq_i = &G.st_irq[current][cpu_index[cpu] + 1];
  543. ic->interrupts = strtoul(cp, &next, 10);
  544. /* Count only numerical IRQs */
  545. if (isdigit(last_char)) {
  546. irq_i->irq_nr += ic->interrupts;
  547. //bb_error_msg("G.st_irq[%u][%u].irq_nr + %u = %lld",
  548. // current, cpu_index[cpu] + 1, ic->interrupts, irq_i->irq_nr);
  549. }
  550. cp = next;
  551. }
  552. irq++;
  553. }
  554. fclose(fp);
  555. free(buf);
  556. while (irq < irqs_per_cpu) {
  557. /* Number of interrupts per CPU has changed */
  558. ic = &per_cpu_stats[current][irq];
  559. ic->irq_name[0] = '\0'; /* False interrupt */
  560. irq++;
  561. }
  562. }
  563. static void get_uptime(data_t *uptime)
  564. {
  565. FILE *fp;
  566. char buf[sizeof(long)*3 * 2 + 4]; /* enough for long.long */
  567. unsigned long uptime_sec, decimal;
  568. fp = xfopen_for_read(PROCFS_UPTIME);
  569. if (fgets(buf, sizeof(buf), fp)) {
  570. if (sscanf(buf, "%lu.%lu", &uptime_sec, &decimal) == 2) {
  571. *uptime = (data_t)uptime_sec * G.hz + decimal * G.hz / 100;
  572. }
  573. }
  574. fclose(fp);
  575. }
  576. static void get_localtime(struct tm *tm)
  577. {
  578. time_t timer;
  579. time(&timer);
  580. localtime_r(&timer, tm);
  581. }
  582. static void alarm_handler(int sig UNUSED_PARAM)
  583. {
  584. signal(SIGALRM, alarm_handler);
  585. alarm(G.interval);
  586. }
  587. static void main_loop(void)
  588. {
  589. unsigned current;
  590. unsigned cpus;
  591. /* Read the stats */
  592. if (G.cpu_nr > 1) {
  593. G.per_cpu_uptime[0] = 0;
  594. get_uptime(&G.per_cpu_uptime[0]);
  595. }
  596. get_cpu_statistics(G.st_cpu[0], &G.global_uptime[0], &G.per_cpu_uptime[0]);
  597. if (display_opt(D_IRQ_SUM))
  598. get_irqs_from_stat(G.st_irq[0]);
  599. if (display_opt(D_IRQ_SUM | D_IRQ_CPU))
  600. get_irqs_from_interrupts(PROCFS_INTERRUPTS, G.st_irqcpu,
  601. G.irqcpu_nr, 0);
  602. if (display_opt(D_SOFTIRQS))
  603. get_irqs_from_interrupts(PROCFS_SOFTIRQS, G.st_softirqcpu,
  604. G.softirqcpu_nr, 0);
  605. if (G.interval == 0) {
  606. /* Display since boot time */
  607. cpus = G.cpu_nr + 1;
  608. G.timestamp[1] = G.timestamp[0];
  609. memset(G.st_cpu[1], 0, sizeof(G.st_cpu[1][0]) * cpus);
  610. memset(G.st_irq[1], 0, sizeof(G.st_irq[1][0]) * cpus);
  611. memset(G.st_irqcpu[1], 0, sizeof(G.st_irqcpu[1][0]) * cpus * G.irqcpu_nr);
  612. memset(G.st_softirqcpu[1], 0, sizeof(G.st_softirqcpu[1][0]) * cpus * G.softirqcpu_nr);
  613. write_stats(0);
  614. /* And we're done */
  615. return;
  616. }
  617. /* Set a handler for SIGALRM */
  618. alarm_handler(0);
  619. /* Save the stats we already have. We need them to compute the average */
  620. G.timestamp[2] = G.timestamp[0];
  621. G.global_uptime[2] = G.global_uptime[0];
  622. G.per_cpu_uptime[2] = G.per_cpu_uptime[0];
  623. cpus = G.cpu_nr + 1;
  624. memcpy(G.st_cpu[2], G.st_cpu[0], sizeof(G.st_cpu[0][0]) * cpus);
  625. memcpy(G.st_irq[2], G.st_irq[0], sizeof(G.st_irq[0][0]) * cpus);
  626. memcpy(G.st_irqcpu[2], G.st_irqcpu[0], sizeof(G.st_irqcpu[0][0]) * cpus * G.irqcpu_nr);
  627. if (display_opt(D_SOFTIRQS)) {
  628. memcpy(G.st_softirqcpu[2], G.st_softirqcpu[0],
  629. sizeof(G.st_softirqcpu[0][0]) * cpus * G.softirqcpu_nr);
  630. }
  631. current = 1;
  632. while (1) {
  633. /* Suspend until a signal is received */
  634. pause();
  635. /* Set structures to 0 to distinguish off/online CPUs */
  636. memset(&G.st_cpu[current][/*cpu:*/ 1], 0, sizeof(G.st_cpu[0][0]) * G.cpu_nr);
  637. get_localtime(&G.timestamp[current]);
  638. /* Read stats */
  639. if (G.cpu_nr > 1) {
  640. G.per_cpu_uptime[current] = 0;
  641. get_uptime(&G.per_cpu_uptime[current]);
  642. }
  643. get_cpu_statistics(G.st_cpu[current], &G.global_uptime[current], &G.per_cpu_uptime[current]);
  644. if (display_opt(D_IRQ_SUM))
  645. get_irqs_from_stat(G.st_irq[current]);
  646. if (display_opt(D_IRQ_SUM | D_IRQ_CPU)) {
  647. int cpu;
  648. for (cpu = 1; cpu <= G.cpu_nr; cpu++) {
  649. G.st_irq[current][cpu].irq_nr = 0;
  650. }
  651. /* accumulates .irq_nr */
  652. get_irqs_from_interrupts(PROCFS_INTERRUPTS, G.st_irqcpu,
  653. G.irqcpu_nr, current);
  654. }
  655. if (display_opt(D_SOFTIRQS))
  656. get_irqs_from_interrupts(PROCFS_SOFTIRQS,
  657. G.st_softirqcpu,
  658. G.softirqcpu_nr, current);
  659. write_stats(current);
  660. if (G.count > 0) {
  661. if (--G.count == 0)
  662. break;
  663. }
  664. current ^= 1;
  665. }
  666. /* Print average statistics */
  667. write_stats_avg(current);
  668. }
  669. /* Initialization */
  670. static void alloc_struct(int cpus)
  671. {
  672. int i;
  673. for (i = 0; i < 3; i++) {
  674. G.st_cpu[i] = xzalloc(sizeof(G.st_cpu[i][0]) * cpus);
  675. G.st_irq[i] = xzalloc(sizeof(G.st_irq[i][0]) * cpus);
  676. G.st_irqcpu[i] = xzalloc(sizeof(G.st_irqcpu[i][0]) * cpus * G.irqcpu_nr);
  677. G.st_softirqcpu[i] = xzalloc(sizeof(G.st_softirqcpu[i][0]) * cpus * G.softirqcpu_nr);
  678. }
  679. G.cpu_bitmap_len = (cpus >> 3) + 1;
  680. G.cpu_bitmap = xzalloc(G.cpu_bitmap_len);
  681. }
  682. static void print_header(struct tm *t)
  683. {
  684. char cur_date[16];
  685. struct utsname uts;
  686. /* Get system name, release number and hostname */
  687. uname(&uts);
  688. strftime(cur_date, sizeof(cur_date), "%x", t);
  689. printf("%s %s (%s)\t%s\t_%s_\t(%u CPU)\n",
  690. uts.sysname, uts.release, uts.nodename, cur_date, uts.machine, G.cpu_nr);
  691. }
  692. /*
  693. * Get number of interrupts available per processor
  694. */
  695. static int get_irqcpu_nr(const char *f, int max_irqs)
  696. {
  697. FILE *fp;
  698. char *line;
  699. unsigned linelen;
  700. unsigned irq;
  701. fp = fopen_for_read(f);
  702. if (!fp) /* No interrupts file */
  703. return 0;
  704. linelen = INTERRUPTS_LINE + 16 * G.cpu_nr;
  705. line = xmalloc(linelen);
  706. irq = 0;
  707. while (fgets(line, linelen, fp)
  708. && irq < max_irqs
  709. ) {
  710. int p = strcspn(line, ":");
  711. if ((p > 0) && (p < 16))
  712. irq++;
  713. }
  714. fclose(fp);
  715. free(line);
  716. return irq;
  717. }
  718. //usage:#define mpstat_trivial_usage
  719. //usage: "[-A] [-I SUM|CPU|ALL|SCPU] [-u] [-P num|ALL] [INTERVAL [COUNT]]"
  720. //usage:#define mpstat_full_usage "\n\n"
  721. //usage: "Per-processor statistics\n"
  722. //usage: "\n -A Same as -I ALL -u -P ALL"
  723. //usage: "\n -I SUM|CPU|ALL|SCPU Report interrupt statistics"
  724. //usage: "\n -P num|ALL Processor to monitor"
  725. //usage: "\n -u Report CPU utilization"
  726. int mpstat_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE;
  727. int mpstat_main(int argc UNUSED_PARAM, char **argv)
  728. {
  729. char *opt_irq_fmt;
  730. char *opt_set_cpu;
  731. int i, opt;
  732. enum {
  733. OPT_ALL = 1 << 0, /* -A */
  734. OPT_INTS = 1 << 1, /* -I */
  735. OPT_SETCPU = 1 << 2, /* -P */
  736. OPT_UTIL = 1 << 3, /* -u */
  737. };
  738. /* Dont buffer data if redirected to a pipe */
  739. setbuf(stdout, NULL);
  740. INIT_G();
  741. G.interval = -1;
  742. /* Get number of processors */
  743. G.cpu_nr = get_cpu_count();
  744. /* Get number of clock ticks per sec */
  745. G.hz = bb_clk_tck();
  746. /* Calculate number of interrupts per processor */
  747. G.irqcpu_nr = get_irqcpu_nr(PROCFS_INTERRUPTS, NR_IRQS) + NR_IRQCPU_PREALLOC;
  748. /* Calculate number of soft interrupts per processor */
  749. G.softirqcpu_nr = get_irqcpu_nr(PROCFS_SOFTIRQS, NR_IRQS) + NR_IRQCPU_PREALLOC;
  750. /* Allocate space for structures. + 1 for global structure. */
  751. alloc_struct(G.cpu_nr + 1);
  752. /* Parse and process arguments */
  753. opt = getopt32(argv, "AI:P:u", &opt_irq_fmt, &opt_set_cpu);
  754. argv += optind;
  755. if (*argv) {
  756. /* Get interval */
  757. G.interval = xatoi_positive(*argv);
  758. G.count = -1;
  759. argv++;
  760. if (*argv) {
  761. /* Get count value */
  762. if (G.interval == 0)
  763. bb_show_usage();
  764. G.count = xatoi_positive(*argv);
  765. //if (*++argv)
  766. // bb_show_usage();
  767. }
  768. }
  769. if (G.interval < 0)
  770. G.interval = 0;
  771. if (opt & OPT_ALL) {
  772. G.p_option = 1;
  773. G.options |= D_CPU + D_IRQ_SUM + D_IRQ_CPU + D_SOFTIRQS;
  774. /* Select every CPU */
  775. memset(G.cpu_bitmap, 0xff, G.cpu_bitmap_len);
  776. }
  777. if (opt & OPT_INTS) {
  778. static const char v[] = {
  779. D_IRQ_CPU, D_IRQ_SUM, D_SOFTIRQS,
  780. D_IRQ_SUM + D_IRQ_CPU + D_SOFTIRQS
  781. };
  782. i = index_in_strings("CPU\0SUM\0SCPU\0ALL\0", opt_irq_fmt);
  783. if (i == -1)
  784. bb_show_usage();
  785. G.options |= v[i];
  786. }
  787. if ((opt & OPT_UTIL) /* -u? */
  788. || G.options == 0 /* nothing? (use default then) */
  789. ) {
  790. G.options |= D_CPU;
  791. }
  792. if (opt & OPT_SETCPU) {
  793. char *t;
  794. G.p_option = 1;
  795. for (t = strtok_r(opt_set_cpu, ",", &opt_set_cpu); t; t = strtok_r(NULL, ",", &opt_set_cpu)) {
  796. if (strcmp(t, "ALL") == 0) {
  797. /* Select every CPU */
  798. memset(G.cpu_bitmap, 0xff, G.cpu_bitmap_len);
  799. } else {
  800. /* Get CPU number */
  801. unsigned n = xatoi_positive(t);
  802. if (n >= G.cpu_nr)
  803. bb_simple_error_msg_and_die("not that many processors");
  804. n++;
  805. G.cpu_bitmap[n >> 3] |= 1 << (n & 7);
  806. }
  807. }
  808. }
  809. if (!G.p_option)
  810. /* Display global stats */
  811. G.cpu_bitmap[0] = 1;
  812. /* Get time */
  813. get_localtime(&G.timestamp[0]);
  814. /* Display header */
  815. print_header(&G.timestamp[0]);
  816. /* The main loop */
  817. main_loop();
  818. if (ENABLE_FEATURE_CLEAN_UP) {
  819. /* Clean up */
  820. for (i = 0; i < 3; i++) {
  821. free(G.st_cpu[i]);
  822. free(G.st_irq[i]);
  823. free(G.st_irqcpu[i]);
  824. free(G.st_softirqcpu[i]);
  825. }
  826. free(G.cpu_bitmap);
  827. free(&G);
  828. }
  829. return EXIT_SUCCESS;
  830. }