mpstat.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017
  1. /* vi: set sw=4 ts=4: */
  2. /*
  3. * Per-processor statistics, based on sysstat version 9.1.2 by Sebastien Godard
  4. *
  5. * Copyright (C) 2010 Marek Polacek <mmpolacek@gmail.com>
  6. *
  7. * Licensed under GPLv2, see file License in this tarball for details.
  8. */
  9. //applet:IF_MPSTAT(APPLET(mpstat, _BB_DIR_BIN, _BB_SUID_DROP))
  10. //kbuild:lib-$(CONFIG_MPSTAT) += mpstat.o
  11. //config:config MPSTAT
  12. //config: bool "mpstat"
  13. //config: default y
  14. //config: help
  15. //config: Per-processor statistics
  16. #include "libbb.h"
  17. #include <sys/utsname.h> /* struct utsname */
  18. //#define debug(fmt, ...) fprintf(stderr, fmt, ## __VA_ARGS__)
  19. #define debug(fmt, ...) ((void)0)
  20. /* Size of /proc/interrupts line, CPU data excluded */
  21. #define INTERRUPTS_LINE 64
  22. /* Maximum number of interrupts */
  23. #define NR_IRQS 256
  24. #define NR_IRQCPU_PREALLOC 3
  25. #define MAX_IRQNAME_LEN 16
  26. #define MAX_PF_NAME 512
  27. /* sysstat 9.0.6 uses width 8, but newer code which also prints /proc/softirqs
  28. * data needs more: "interrupts" in /proc/softirqs have longer names,
  29. * most are up to 8 chars, one (BLOCK_IOPOLL) is even longer.
  30. * We are printing headers in the " IRQNAME/s" form, experimentally
  31. * anything smaller than 10 chars looks ugly for /proc/softirqs stats.
  32. */
  33. #define INTRATE_SCRWIDTH 10
  34. #define INTRATE_SCRWIDTH_STR "10"
  35. /* System files */
  36. #define SYSFS_DEVCPU "/sys/devices/system/cpu"
  37. #define PROCFS_STAT "/proc/stat"
  38. #define PROCFS_INTERRUPTS "/proc/interrupts"
  39. #define PROCFS_SOFTIRQS "/proc/softirqs"
  40. #define PROCFS_UPTIME "/proc/uptime"
  41. #if 1
  42. typedef unsigned long long data_t;
  43. typedef long long idata_t;
  44. #define FMT_DATA "ll"
  45. #define DATA_MAX ULLONG_MAX
  46. #else
  47. typedef unsigned long data_t;
  48. typedef long idata_t;
  49. #define FMT_DATA "l"
  50. #define DATA_MAX ULONG_MAX
  51. #endif
  52. struct stats_irqcpu {
  53. unsigned interrupt;
  54. char irq_name[MAX_IRQNAME_LEN];
  55. };
  56. struct stats_cpu {
  57. data_t cpu_user;
  58. data_t cpu_nice;
  59. data_t cpu_system;
  60. data_t cpu_idle;
  61. data_t cpu_iowait;
  62. data_t cpu_steal;
  63. data_t cpu_irq;
  64. data_t cpu_softirq;
  65. data_t cpu_guest;
  66. };
  67. struct stats_irq {
  68. data_t irq_nr;
  69. };
  70. /* Globals. Sort by size and access frequency. */
  71. struct globals {
  72. int interval;
  73. int count;
  74. unsigned cpu_nr; /* Number of CPUs */
  75. unsigned irqcpu_nr; /* Number of interrupts per CPU */
  76. unsigned softirqcpu_nr; /* Number of soft interrupts per CPU */
  77. unsigned options;
  78. unsigned hz;
  79. unsigned cpu_bitmap_len;
  80. smallint p_option;
  81. // 9.0.6 does not do it. Try "mpstat -A 1 2" - headers are repeated!
  82. //smallint header_done;
  83. //smallint avg_header_done;
  84. unsigned char *cpu_bitmap; /* Bit 0: global, bit 1: 1st proc... */
  85. data_t global_uptime[3];
  86. data_t per_cpu_uptime[3];
  87. struct stats_cpu *st_cpu[3];
  88. struct stats_irq *st_irq[3];
  89. struct stats_irqcpu *st_irqcpu[3];
  90. struct stats_irqcpu *st_softirqcpu[3];
  91. struct tm timestamp[3];
  92. };
  93. #define G (*ptr_to_globals)
  94. #define INIT_G() do { \
  95. SET_PTR_TO_GLOBALS(xzalloc(sizeof(G))); \
  96. } while (0)
  97. /* The selected interrupts statistics (bits in G.options) */
  98. enum {
  99. D_CPU = 1 << 0,
  100. D_IRQ_SUM = 1 << 1,
  101. D_IRQ_CPU = 1 << 2,
  102. D_SOFTIRQS = 1 << 3,
  103. };
  104. /* Does str start with "cpu"? */
  105. static int starts_with_cpu(const char *str)
  106. {
  107. return !((str[0] - 'c') | (str[1] - 'p') | (str[2] - 'u'));
  108. }
  109. /* Is option on? */
  110. static ALWAYS_INLINE int display_opt(int opt)
  111. {
  112. return (opt & G.options);
  113. }
  114. #if DATA_MAX > 0xffffffff
  115. /*
  116. * Handle overflow conditions properly for counters which can have
  117. * less bits than data_t, depending on the kernel version.
  118. */
  119. /* Surprisingly, on 32bit inlining is a size win */
  120. static ALWAYS_INLINE data_t overflow_safe_sub(data_t prev, data_t curr)
  121. {
  122. data_t v = curr - prev;
  123. if ((idata_t)v < 0 /* curr < prev - counter overflow? */
  124. && prev <= 0xffffffff /* kernel uses 32bit value for the counter? */
  125. ) {
  126. /* Add 33th bit set to 1 to curr, compensating for the overflow */
  127. /* double shift defeats "warning: left shift count >= width of type" */
  128. v += ((data_t)1 << 16) << 16;
  129. }
  130. return v;
  131. }
  132. #else
  133. static ALWAYS_INLINE data_t overflow_safe_sub(data_t prev, data_t curr)
  134. {
  135. return curr - prev;
  136. }
  137. #endif
  138. static double percent_value(data_t prev, data_t curr, data_t itv)
  139. {
  140. return ((double)overflow_safe_sub(prev, curr)) / itv * 100;
  141. }
  142. static double hz_value(data_t prev, data_t curr, data_t itv)
  143. {
  144. //bb_error_msg("curr:%lld prev:%lld G.hz:%u", curr, prev, G.hz);
  145. return ((double)overflow_safe_sub(prev, curr)) / itv * G.hz;
  146. }
  147. static ALWAYS_INLINE data_t jiffies_diff(data_t old, data_t new)
  148. {
  149. data_t diff = new - old;
  150. return (diff == 0) ? 1 : diff;
  151. }
  152. static int is_cpu_in_bitmap(unsigned cpu)
  153. {
  154. return G.cpu_bitmap[cpu >> 3] & (1 << (cpu & 7));
  155. }
  156. static void write_irqcpu_stats(struct stats_irqcpu *per_cpu_stats[],
  157. int total_irqs,
  158. data_t itv,
  159. int prev, int current,
  160. const char *prev_str, const char *current_str)
  161. {
  162. int j;
  163. int offset, cpu;
  164. struct stats_irqcpu *p0, *q0;
  165. /* Check if number of IRQs has changed */
  166. if (G.interval != 0) {
  167. for (j = 0; j <= total_irqs; j++) {
  168. p0 = &per_cpu_stats[current][j];
  169. if (p0->irq_name[0] != '\0') {
  170. q0 = &per_cpu_stats[prev][j];
  171. if (strcmp(p0->irq_name, q0->irq_name) != 0) {
  172. /* Strings are different */
  173. break;
  174. }
  175. }
  176. }
  177. }
  178. /* Print header */
  179. printf("\n%-11s CPU", prev_str);
  180. {
  181. /* A bit complex code to "buy back" space if one header is too wide.
  182. * Here's how it looks like. BLOCK_IOPOLL eats too much space,
  183. * and latter headers use smaller width to compensate:
  184. * ...BLOCK/s BLOCK_IOPOLL/s TASKLET/s SCHED/s HRTIMER/s RCU/s
  185. * ... 2.32 0.00 0.01 17.58 0.14 141.96
  186. */
  187. int expected_len = 0;
  188. int printed_len = 0;
  189. for (j = 0; j < total_irqs; j++) {
  190. p0 = &per_cpu_stats[current][j];
  191. if (p0->irq_name[0] != '\0') {
  192. int n = (INTRATE_SCRWIDTH-3) - (printed_len - expected_len);
  193. printed_len += printf(" %*s/s", n > 0 ? n : 0, skip_whitespace(p0->irq_name));
  194. expected_len += INTRATE_SCRWIDTH;
  195. }
  196. }
  197. }
  198. bb_putchar('\n');
  199. for (cpu = 1; cpu <= G.cpu_nr; cpu++) {
  200. /* Check if we want stats about this CPU */
  201. if (!is_cpu_in_bitmap(cpu) && G.p_option) {
  202. continue;
  203. }
  204. printf("%-11s %4u", current_str, cpu - 1);
  205. for (j = 0; j < total_irqs; j++) {
  206. /* IRQ field set only for proc 0 */
  207. p0 = &per_cpu_stats[current][j];
  208. /*
  209. * An empty string for irq name means that
  210. * interrupt is no longer used.
  211. */
  212. if (p0->irq_name[0] != '\0') {
  213. offset = j;
  214. q0 = &per_cpu_stats[prev][offset];
  215. /*
  216. * If we want stats for the time since boot
  217. * we have p0->irq != q0->irq.
  218. */
  219. if (strcmp(p0->irq_name, q0->irq_name) != 0
  220. && G.interval != 0
  221. ) {
  222. if (j) {
  223. offset = j - 1;
  224. q0 = &per_cpu_stats[prev][offset];
  225. }
  226. if (strcmp(p0->irq_name, q0->irq_name) != 0
  227. && (j + 1 < total_irqs)
  228. ) {
  229. offset = j + 1;
  230. q0 = &per_cpu_stats[prev][offset];
  231. }
  232. }
  233. if (strcmp(p0->irq_name, q0->irq_name) == 0
  234. || G.interval == 0
  235. ) {
  236. struct stats_irqcpu *p, *q;
  237. p = &per_cpu_stats[current][(cpu - 1) * total_irqs + j];
  238. q = &per_cpu_stats[prev][(cpu - 1) * total_irqs + offset];
  239. printf("%"INTRATE_SCRWIDTH_STR".2f",
  240. (double)(p->interrupt - q->interrupt) / itv * G.hz);
  241. } else {
  242. printf(" N/A");
  243. }
  244. }
  245. }
  246. bb_putchar('\n');
  247. }
  248. }
  249. static data_t get_per_cpu_interval(const struct stats_cpu *scc,
  250. const struct stats_cpu *scp)
  251. {
  252. return ((scc->cpu_user + scc->cpu_nice +
  253. scc->cpu_system + scc->cpu_iowait +
  254. scc->cpu_idle + scc->cpu_steal +
  255. scc->cpu_irq + scc->cpu_softirq) -
  256. (scp->cpu_user + scp->cpu_nice +
  257. scp->cpu_system + scp->cpu_iowait +
  258. scp->cpu_idle + scp->cpu_steal +
  259. scp->cpu_irq + scp->cpu_softirq));
  260. }
  261. static void print_stats_cpu_struct(const struct stats_cpu *p,
  262. const struct stats_cpu *c,
  263. data_t itv)
  264. {
  265. printf(" %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f\n",
  266. percent_value(p->cpu_user - p->cpu_guest,
  267. /**/ c->cpu_user - c->cpu_guest, itv),
  268. percent_value(p->cpu_nice , c->cpu_nice , itv),
  269. percent_value(p->cpu_system , c->cpu_system , itv),
  270. percent_value(p->cpu_iowait , c->cpu_iowait , itv),
  271. percent_value(p->cpu_irq , c->cpu_irq , itv),
  272. percent_value(p->cpu_softirq, c->cpu_softirq, itv),
  273. percent_value(p->cpu_steal , c->cpu_steal , itv),
  274. percent_value(p->cpu_guest , c->cpu_guest , itv),
  275. percent_value(p->cpu_idle , c->cpu_idle , itv)
  276. );
  277. }
  278. static void write_stats_core(int prev, int current,
  279. const char *prev_str, const char *current_str)
  280. {
  281. struct stats_cpu *scc, *scp;
  282. data_t itv, global_itv;
  283. int cpu;
  284. /* Compute time interval */
  285. itv = global_itv = jiffies_diff(G.global_uptime[prev], G.global_uptime[current]);
  286. /* Reduce interval to one CPU */
  287. if (G.cpu_nr > 1)
  288. itv = jiffies_diff(G.per_cpu_uptime[prev], G.per_cpu_uptime[current]);
  289. /* Print CPU stats */
  290. if (display_opt(D_CPU)) {
  291. ///* This is done exactly once */
  292. //if (!G.header_done) {
  293. printf("\n%-11s CPU %%usr %%nice %%sys %%iowait %%irq %%soft %%steal %%guest %%idle\n",
  294. prev_str
  295. );
  296. // G.header_done = 1;
  297. //}
  298. for (cpu = 0; cpu <= G.cpu_nr; cpu++) {
  299. data_t per_cpu_itv;
  300. /* Print stats about this particular CPU? */
  301. if (!is_cpu_in_bitmap(cpu))
  302. continue;
  303. scc = &G.st_cpu[current][cpu];
  304. scp = &G.st_cpu[prev][cpu];
  305. per_cpu_itv = global_itv;
  306. printf((cpu ? "%-11s %4u" : "%-11s all"), current_str, cpu - 1);
  307. if (cpu) {
  308. double idle;
  309. /*
  310. * If the CPU is offline, then it isn't in /proc/stat,
  311. * so all values are 0.
  312. * NB: Guest time is already included in user time.
  313. */
  314. if ((scc->cpu_user | scc->cpu_nice | scc->cpu_system |
  315. scc->cpu_iowait | scc->cpu_idle | scc->cpu_steal |
  316. scc->cpu_irq | scc->cpu_softirq) == 0
  317. ) {
  318. /*
  319. * Set current struct fields to values from prev.
  320. * iteration. Then their values won't jump from
  321. * zero, when the CPU comes back online.
  322. */
  323. *scc = *scp;
  324. idle = 0.0;
  325. goto print_zeros;
  326. }
  327. /* Compute interval again for current proc */
  328. per_cpu_itv = get_per_cpu_interval(scc, scp);
  329. if (per_cpu_itv == 0) {
  330. /*
  331. * If the CPU is tickless then there is no change in CPU values
  332. * but the sum of values is not zero.
  333. */
  334. idle = 100.0;
  335. print_zeros:
  336. printf(" %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f\n",
  337. 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, idle);
  338. continue;
  339. }
  340. }
  341. print_stats_cpu_struct(scp, scc, per_cpu_itv);
  342. }
  343. }
  344. /* Print total number of IRQs per CPU */
  345. if (display_opt(D_IRQ_SUM)) {
  346. ///* Print average header, this is done exactly once */
  347. //if (!G.avg_header_done) {
  348. printf("\n%-11s CPU intr/s\n", prev_str);
  349. // G.avg_header_done = 1;
  350. //}
  351. for (cpu = 0; cpu <= G.cpu_nr; cpu++) {
  352. data_t per_cpu_itv;
  353. /* Print stats about this CPU? */
  354. if (!is_cpu_in_bitmap(cpu))
  355. continue;
  356. per_cpu_itv = itv;
  357. printf((cpu ? "%-11s %4u" : "%-11s all"), current_str, cpu - 1);
  358. if (cpu) {
  359. scc = &G.st_cpu[current][cpu];
  360. scp = &G.st_cpu[prev][cpu];
  361. /* Compute interval again for current proc */
  362. per_cpu_itv = get_per_cpu_interval(scc, scp);
  363. if (per_cpu_itv == 0) {
  364. printf(" %9.2f\n", 0.0);
  365. continue;
  366. }
  367. }
  368. //bb_error_msg("G.st_irq[%u][%u].irq_nr:%lld - G.st_irq[%u][%u].irq_nr:%lld",
  369. // current, cpu, G.st_irq[prev][cpu].irq_nr, prev, cpu, G.st_irq[current][cpu].irq_nr);
  370. printf(" %9.2f\n", hz_value(G.st_irq[prev][cpu].irq_nr, G.st_irq[current][cpu].irq_nr, per_cpu_itv));
  371. }
  372. }
  373. if (display_opt(D_IRQ_CPU)) {
  374. write_irqcpu_stats(G.st_irqcpu, G.irqcpu_nr,
  375. itv,
  376. prev, current,
  377. prev_str, current_str
  378. );
  379. }
  380. if (display_opt(D_SOFTIRQS)) {
  381. write_irqcpu_stats(G.st_softirqcpu, G.softirqcpu_nr,
  382. itv,
  383. prev, current,
  384. prev_str, current_str
  385. );
  386. }
  387. }
  388. /*
  389. * Print the statistics
  390. */
  391. static void write_stats(int current)
  392. {
  393. char prev_time[16];
  394. char curr_time[16];
  395. strftime(prev_time, sizeof(prev_time), "%X", &G.timestamp[!current]);
  396. strftime(curr_time, sizeof(curr_time), "%X", &G.timestamp[current]);
  397. write_stats_core(!current, current, prev_time, curr_time);
  398. }
  399. static void write_stats_avg(int current)
  400. {
  401. write_stats_core(2, current, "Average:", "Average:");
  402. }
  403. /*
  404. * Read CPU statistics
  405. */
  406. static void get_cpu_statistics(struct stats_cpu *cpu, data_t *up, data_t *up0)
  407. {
  408. FILE *fp;
  409. char buf[1024];
  410. fp = xfopen_for_read(PROCFS_STAT);
  411. while (fgets(buf, sizeof(buf), fp)) {
  412. data_t sum;
  413. unsigned cpu_number;
  414. struct stats_cpu *cp;
  415. if (!starts_with_cpu(buf))
  416. continue; /* not "cpu" */
  417. cp = cpu; /* for "cpu " case */
  418. if (buf[3] != ' ') {
  419. /* "cpuN " */
  420. if (G.cpu_nr == 0
  421. || sscanf(buf + 3, "%u ", &cpu_number) != 1
  422. || cpu_number >= G.cpu_nr
  423. ) {
  424. continue;
  425. }
  426. cp = &cpu[cpu_number + 1];
  427. }
  428. /* Read the counters, save them */
  429. /* Not all fields have to be present */
  430. memset(cp, 0, sizeof(*cp));
  431. sscanf(buf, "%*s"
  432. " %"FMT_DATA"u %"FMT_DATA"u %"FMT_DATA"u"
  433. " %"FMT_DATA"u %"FMT_DATA"u %"FMT_DATA"u"
  434. " %"FMT_DATA"u %"FMT_DATA"u %"FMT_DATA"u",
  435. &cp->cpu_user, &cp->cpu_nice, &cp->cpu_system,
  436. &cp->cpu_idle, &cp->cpu_iowait, &cp->cpu_irq,
  437. &cp->cpu_softirq, &cp->cpu_steal, &cp->cpu_guest
  438. );
  439. /*
  440. * Compute uptime in jiffies (1/HZ), it'll be the sum of
  441. * individual CPU's uptimes.
  442. * NB: We have to omit cpu_guest, because cpu_user includes it.
  443. */
  444. sum = cp->cpu_user + cp->cpu_nice + cp->cpu_system +
  445. cp->cpu_idle + cp->cpu_iowait + cp->cpu_irq +
  446. cp->cpu_softirq + cp->cpu_steal;
  447. if (buf[3] == ' ') {
  448. /* "cpu " */
  449. *up = sum;
  450. } else {
  451. /* "cpuN " */
  452. if (cpu_number == 0 && *up0 != 0) {
  453. /* Compute uptime of single CPU */
  454. *up0 = sum;
  455. }
  456. }
  457. }
  458. fclose(fp);
  459. }
  460. /*
  461. * Read IRQs from /proc/stat
  462. */
  463. static void get_irqs_from_stat(struct stats_irq *irq)
  464. {
  465. FILE *fp;
  466. char buf[1024];
  467. fp = fopen_for_read(PROCFS_STAT);
  468. if (!fp)
  469. return;
  470. while (fgets(buf, sizeof(buf), fp)) {
  471. //bb_error_msg("/proc/stat:'%s'", buf);
  472. if (strncmp(buf, "intr ", 5) == 0) {
  473. /* Read total number of IRQs since system boot */
  474. sscanf(buf + 5, "%"FMT_DATA"u", &irq->irq_nr);
  475. }
  476. }
  477. fclose(fp);
  478. }
  479. /*
  480. * Read stats from /proc/interrupts or /proc/softirqs
  481. */
  482. static void get_irqs_from_interrupts(const char *fname,
  483. struct stats_irqcpu *per_cpu_stats[],
  484. int irqs_per_cpu, int current)
  485. {
  486. FILE *fp;
  487. struct stats_irq *irq_i;
  488. struct stats_irqcpu *ic;
  489. char *buf;
  490. unsigned buflen;
  491. unsigned cpu;
  492. unsigned irq;
  493. int cpu_index[G.cpu_nr];
  494. int iindex;
  495. // Moved to caller.
  496. // Otherwise reading of /proc/softirqs
  497. // was resetting counts to 0 after we painstakingly collected them from
  498. // /proc/interrupts. Which resulted in:
  499. // 01:32:47 PM CPU intr/s
  500. // 01:32:47 PM all 591.47
  501. // 01:32:47 PM 0 0.00 <= ???
  502. // 01:32:47 PM 1 0.00 <= ???
  503. // for (cpu = 1; cpu <= G.cpu_nr; cpu++) {
  504. // G.st_irq[current][cpu].irq_nr = 0;
  505. // //bb_error_msg("G.st_irq[%u][%u].irq_nr=0", current, cpu);
  506. // }
  507. fp = fopen_for_read(fname);
  508. if (!fp)
  509. return;
  510. buflen = INTERRUPTS_LINE + 16 * G.cpu_nr;
  511. buf = xmalloc(buflen);
  512. /* Parse header and determine, which CPUs are online */
  513. iindex = 0;
  514. while (fgets(buf, buflen, fp)) {
  515. char *cp, *next;
  516. next = buf;
  517. while ((cp = strstr(next, "CPU")) != NULL
  518. && iindex < G.cpu_nr
  519. ) {
  520. cpu = strtoul(cp + 3, &next, 10);
  521. cpu_index[iindex++] = cpu;
  522. }
  523. if (iindex) /* We found header */
  524. break;
  525. }
  526. irq = 0;
  527. while (fgets(buf, buflen, fp)
  528. && irq < irqs_per_cpu
  529. ) {
  530. int len;
  531. char last_char;
  532. char *cp;
  533. /* Skip over "IRQNAME:" */
  534. cp = strchr(buf, ':');
  535. if (!cp)
  536. continue;
  537. last_char = cp[-1];
  538. ic = &per_cpu_stats[current][irq];
  539. len = cp - buf;
  540. if (len >= sizeof(ic->irq_name)) {
  541. len = sizeof(ic->irq_name) - 1;
  542. }
  543. safe_strncpy(ic->irq_name, buf, len + 1);
  544. //bb_error_msg("%s: irq%d:'%s' buf:'%s'", fname, irq, ic->irq_name, buf);
  545. cp++;
  546. for (cpu = 0; cpu < iindex; cpu++) {
  547. char *next;
  548. ic = &per_cpu_stats[current][cpu_index[cpu] * irqs_per_cpu + irq];
  549. irq_i = &G.st_irq[current][cpu_index[cpu] + 1];
  550. ic->interrupt = strtoul(cp, &next, 10);
  551. /* Count only numerical IRQs */
  552. if (isdigit(last_char)) {
  553. irq_i->irq_nr += ic->interrupt;
  554. //bb_error_msg("G.st_irq[%u][%u].irq_nr + %u = %lld",
  555. // current, cpu_index[cpu] + 1, ic->interrupt, irq_i->irq_nr);
  556. }
  557. cp = next;
  558. }
  559. irq++;
  560. }
  561. fclose(fp);
  562. free(buf);
  563. while (irq < irqs_per_cpu) {
  564. /* Number of interrupts per CPU has changed */
  565. ic = &per_cpu_stats[current][irq];
  566. ic->irq_name[0] = '\0'; /* False interrupt */
  567. irq++;
  568. }
  569. }
  570. static void get_uptime(data_t *uptime)
  571. {
  572. FILE *fp;
  573. char buf[sizeof(long)*3 * 2 + 4]; /* enough for long.long */
  574. unsigned long uptime_sec, decimal;
  575. fp = fopen_for_read(PROCFS_UPTIME);
  576. if (!fp)
  577. return;
  578. if (fgets(buf, sizeof(buf), fp)) {
  579. if (sscanf(buf, "%lu.%lu", &uptime_sec, &decimal) == 2) {
  580. *uptime = (data_t)uptime_sec * G.hz + decimal * G.hz / 100;
  581. }
  582. }
  583. fclose(fp);
  584. }
  585. static void get_localtime(struct tm *tm)
  586. {
  587. time_t timer;
  588. time(&timer);
  589. localtime_r(&timer, tm);
  590. }
  591. static void alarm_handler(int sig UNUSED_PARAM)
  592. {
  593. signal(SIGALRM, alarm_handler);
  594. alarm(G.interval);
  595. }
  596. static void main_loop(void)
  597. {
  598. unsigned current;
  599. unsigned cpus;
  600. /* Read the stats */
  601. if (G.cpu_nr > 1) {
  602. G.per_cpu_uptime[0] = 0;
  603. get_uptime(&G.per_cpu_uptime[0]);
  604. }
  605. get_cpu_statistics(G.st_cpu[0], &G.global_uptime[0], &G.per_cpu_uptime[0]);
  606. if (display_opt(D_IRQ_SUM))
  607. get_irqs_from_stat(G.st_irq[0]);
  608. if (display_opt(D_IRQ_SUM | D_IRQ_CPU))
  609. get_irqs_from_interrupts(PROCFS_INTERRUPTS, G.st_irqcpu,
  610. G.irqcpu_nr, 0);
  611. if (display_opt(D_SOFTIRQS))
  612. get_irqs_from_interrupts(PROCFS_SOFTIRQS, G.st_softirqcpu,
  613. G.softirqcpu_nr, 0);
  614. if (G.interval == 0) {
  615. /* Display since boot time */
  616. cpus = G.cpu_nr + 1;
  617. G.timestamp[1] = G.timestamp[0];
  618. memset(G.st_cpu[1], 0, sizeof(G.st_cpu[1][0]) * cpus);
  619. memset(G.st_irq[1], 0, sizeof(G.st_irq[1][0]) * cpus);
  620. memset(G.st_irqcpu[1], 0, sizeof(G.st_irqcpu[1][0]) * cpus * G.irqcpu_nr);
  621. memset(G.st_softirqcpu[1], 0, sizeof(G.st_softirqcpu[1][0]) * cpus * G.softirqcpu_nr);
  622. write_stats(0);
  623. /* And we're done */
  624. return;
  625. }
  626. /* Set a handler for SIGALRM */
  627. alarm_handler(0);
  628. /* Save the stats we already have. We need them to compute the average */
  629. G.timestamp[2] = G.timestamp[0];
  630. G.global_uptime[2] = G.global_uptime[0];
  631. G.per_cpu_uptime[2] = G.per_cpu_uptime[0];
  632. cpus = G.cpu_nr + 1;
  633. memcpy(G.st_cpu[2], G.st_cpu[0], sizeof(G.st_cpu[0][0]) * cpus);
  634. memcpy(G.st_irq[2], G.st_irq[0], sizeof(G.st_irq[0][0]) * cpus);
  635. memcpy(G.st_irqcpu[2], G.st_irqcpu[0], sizeof(G.st_irqcpu[0][0]) * cpus * G.irqcpu_nr);
  636. if (display_opt(D_SOFTIRQS)) {
  637. memcpy(G.st_softirqcpu[2], G.st_softirqcpu[0],
  638. sizeof(G.st_softirqcpu[0][0]) * cpus * G.softirqcpu_nr);
  639. }
  640. current = 1;
  641. while (1) {
  642. /* Suspend until a signal is received */
  643. pause();
  644. /* Set structures to 0 to distinguish off/online CPUs */
  645. memset(&G.st_cpu[current][/*cpu:*/ 1], 0, sizeof(G.st_cpu[0][0]) * G.cpu_nr);
  646. get_localtime(&G.timestamp[current]);
  647. /* Read stats */
  648. if (G.cpu_nr > 1) {
  649. G.per_cpu_uptime[current] = 0;
  650. get_uptime(&G.per_cpu_uptime[current]);
  651. }
  652. get_cpu_statistics(G.st_cpu[current], &G.global_uptime[current], &G.per_cpu_uptime[current]);
  653. if (display_opt(D_IRQ_SUM))
  654. get_irqs_from_stat(G.st_irq[current]);
  655. if (display_opt(D_IRQ_SUM | D_IRQ_CPU)) {
  656. int cpu;
  657. for (cpu = 1; cpu <= G.cpu_nr; cpu++) {
  658. G.st_irq[current][cpu].irq_nr = 0;
  659. }
  660. /* accumulates .irq_nr */
  661. get_irqs_from_interrupts(PROCFS_INTERRUPTS, G.st_irqcpu,
  662. G.irqcpu_nr, current);
  663. }
  664. if (display_opt(D_SOFTIRQS))
  665. get_irqs_from_interrupts(PROCFS_SOFTIRQS,
  666. G.st_softirqcpu,
  667. G.softirqcpu_nr, current);
  668. write_stats(current);
  669. if (G.count > 0) {
  670. if (--G.count == 0)
  671. break;
  672. }
  673. current ^= 1;
  674. }
  675. /* Print average statistics */
  676. write_stats_avg(current);
  677. }
  678. /* Initialization */
  679. /* Get number of clock ticks per sec */
  680. static ALWAYS_INLINE unsigned get_hz(void)
  681. {
  682. return sysconf(_SC_CLK_TCK);
  683. }
  684. static void alloc_struct(int cpus)
  685. {
  686. int i;
  687. for (i = 0; i < 3; i++) {
  688. G.st_cpu[i] = xzalloc(sizeof(G.st_cpu[i][0]) * cpus);
  689. G.st_irq[i] = xzalloc(sizeof(G.st_irq[i][0]) * cpus);
  690. G.st_irqcpu[i] = xzalloc(sizeof(G.st_irqcpu[i][0]) * cpus * G.irqcpu_nr);
  691. G.st_softirqcpu[i] = xzalloc(sizeof(G.st_softirqcpu[i][0]) * cpus * G.softirqcpu_nr);
  692. }
  693. G.cpu_bitmap_len = (cpus >> 3) + 1;
  694. G.cpu_bitmap = xzalloc(G.cpu_bitmap_len);
  695. }
  696. static void print_header(struct tm *t)
  697. {
  698. char cur_date[16];
  699. struct utsname uts;
  700. /* Get system name, release number and hostname */
  701. uname(&uts);
  702. strftime(cur_date, sizeof(cur_date), "%x", t);
  703. printf("%s %s (%s)\t%s\t_%s_\t(%u CPU)\n",
  704. uts.sysname, uts.release, uts.nodename, cur_date, uts.machine, G.cpu_nr);
  705. }
  706. /*
  707. * Get number of processors in /proc/stat
  708. * Return value '0' means one CPU and non SMP kernel.
  709. * Otherwise N means N processor(s) and SMP kernel.
  710. */
  711. static int get_cpu_nr(void)
  712. {
  713. FILE *fp;
  714. char line[256];
  715. int proc_nr = -1;
  716. fp = xfopen_for_read(PROCFS_STAT);
  717. while (fgets(line, sizeof(line), fp)) {
  718. if (!starts_with_cpu(line)) {
  719. if (proc_nr >= 0)
  720. break; /* we are past "cpuN..." lines */
  721. continue;
  722. }
  723. if (line[3] != ' ') { /* "cpuN" */
  724. int num_proc;
  725. if (sscanf(line + 3, "%u", &num_proc) == 1
  726. && num_proc > proc_nr
  727. ) {
  728. proc_nr = num_proc;
  729. }
  730. }
  731. }
  732. fclose(fp);
  733. return proc_nr + 1;
  734. }
  735. /*
  736. * Get number of interrupts available per processor
  737. */
  738. static int get_irqcpu_nr(const char *f, int max_irqs)
  739. {
  740. FILE *fp;
  741. char *line;
  742. unsigned linelen;
  743. unsigned irq;
  744. fp = fopen_for_read(f);
  745. if (!fp) /* No interrupts file */
  746. return 0;
  747. linelen = INTERRUPTS_LINE + 16 * G.cpu_nr;
  748. line = xmalloc(linelen);
  749. irq = 0;
  750. while (fgets(line, linelen, fp)
  751. && irq < max_irqs
  752. ) {
  753. int p = strcspn(line, ":");
  754. if ((p > 0) && (p < 16))
  755. irq++;
  756. }
  757. fclose(fp);
  758. free(line);
  759. return irq;
  760. }
  761. //usage:#define mpstat_trivial_usage
  762. //usage: "[-A] [-I SUM|CPU|ALL|SCPU] [-u] [-P num|ALL] [INTERVAL [COUNT]]"
  763. //usage:#define mpstat_full_usage "\n\n"
  764. //usage: "Per-processor statistics\n"
  765. //usage: "\nOptions:"
  766. //usage: "\n -A Same as -I ALL -u -P ALL"
  767. //usage: "\n -I SUM|CPU|ALL|SCPU Report interrupt statistics"
  768. //usage: "\n -P num|ALL Processor to monitor"
  769. //usage: "\n -u Report CPU utilization"
  770. int mpstat_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE;
  771. int mpstat_main(int UNUSED_PARAM argc, char **argv)
  772. {
  773. char *opt_irq_fmt;
  774. char *opt_set_cpu;
  775. int i, opt;
  776. enum {
  777. OPT_ALL = 1 << 0, /* -A */
  778. OPT_INTS = 1 << 1, /* -I */
  779. OPT_SETCPU = 1 << 2, /* -P */
  780. OPT_UTIL = 1 << 3, /* -u */
  781. };
  782. /* Dont buffer data if redirected to a pipe */
  783. setbuf(stdout, NULL);
  784. INIT_G();
  785. G.interval = -1;
  786. /* Get number of processors */
  787. G.cpu_nr = get_cpu_nr();
  788. /* Get number of clock ticks per sec */
  789. G.hz = get_hz();
  790. /* Calculate number of interrupts per processor */
  791. G.irqcpu_nr = get_irqcpu_nr(PROCFS_INTERRUPTS, NR_IRQS) + NR_IRQCPU_PREALLOC;
  792. /* Calculate number of soft interrupts per processor */
  793. G.softirqcpu_nr = get_irqcpu_nr(PROCFS_SOFTIRQS, NR_IRQS) + NR_IRQCPU_PREALLOC;
  794. /* Allocate space for structures. + 1 for global structure. */
  795. alloc_struct(G.cpu_nr + 1);
  796. /* Parse and process arguments */
  797. opt = getopt32(argv, "AI:P:u", &opt_irq_fmt, &opt_set_cpu);
  798. argv += optind;
  799. if (*argv) {
  800. /* Get interval */
  801. G.interval = xatoi_u(*argv);
  802. G.count = -1;
  803. argv++;
  804. if (*argv) {
  805. /* Get count value */
  806. if (G.interval == 0)
  807. bb_show_usage();
  808. G.count = xatoi_u(*argv);
  809. //if (*++argv)
  810. // bb_show_usage();
  811. }
  812. }
  813. if (G.interval < 0)
  814. G.interval = 0;
  815. if (opt & OPT_ALL) {
  816. G.p_option = 1;
  817. G.options |= D_CPU + D_IRQ_SUM + D_IRQ_CPU + D_SOFTIRQS;
  818. /* Select every CPU */
  819. memset(G.cpu_bitmap, 0xff, G.cpu_bitmap_len);
  820. }
  821. if (opt & OPT_INTS) {
  822. static const char v[] = {
  823. D_IRQ_CPU, D_IRQ_SUM, D_SOFTIRQS,
  824. D_IRQ_SUM + D_IRQ_CPU + D_SOFTIRQS
  825. };
  826. i = index_in_strings("CPU\0SUM\0SCPU\0ALL\0", opt_irq_fmt);
  827. if (i == -1)
  828. bb_show_usage();
  829. G.options |= v[i];
  830. }
  831. if ((opt & OPT_UTIL) /* -u? */
  832. || G.options == 0 /* nothing? (use default then) */
  833. ) {
  834. G.options |= D_CPU;
  835. }
  836. if (opt & OPT_SETCPU) {
  837. char *t;
  838. G.p_option = 1;
  839. for (t = strtok(opt_set_cpu, ","); t; t = strtok(NULL, ",")) {
  840. if (strcmp(t, "ALL") == 0) {
  841. /* Select every CPU */
  842. memset(G.cpu_bitmap, 0xff, G.cpu_bitmap_len);
  843. } else {
  844. /* Get CPU number */
  845. unsigned n = xatoi_u(t);
  846. if (n >= G.cpu_nr)
  847. bb_error_msg_and_die("not that many processors");
  848. n++;
  849. G.cpu_bitmap[n >> 3] |= 1 << (n & 7);
  850. }
  851. }
  852. }
  853. if (!G.p_option)
  854. /* Display global stats */
  855. G.cpu_bitmap[0] = 1;
  856. /* Get time */
  857. get_localtime(&G.timestamp[0]);
  858. /* Display header */
  859. print_header(&G.timestamp[0]);
  860. /* The main loop */
  861. main_loop();
  862. if (ENABLE_FEATURE_CLEAN_UP) {
  863. /* Clean up */
  864. for (i = 0; i < 3; i++) {
  865. free(G.st_cpu[i]);
  866. free(G.st_irq[i]);
  867. free(G.st_irqcpu[i]);
  868. free(G.st_softirqcpu[i]);
  869. }
  870. free(G.cpu_bitmap);
  871. free(&G);
  872. }
  873. return EXIT_SUCCESS;
  874. }