profiler.c 64 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615
  1. /*++
  2. Copyright (c) 2013 Minoca Corp.
  3. This file is licensed under the terms of the GNU General Public License
  4. version 3. Alternative licensing terms are available. Contact
  5. info@minocacorp.com for details. See the LICENSE file at the root of this
  6. project for complete licensing information.
  7. Module Name:
  8. profiler.c
  9. Abstract:
  10. This module implements system profiling support.
  11. Author:
  12. Chris Stevens 1-Jul-2013
  13. Environment:
  14. Kernel
  15. --*/
  16. //
  17. // ------------------------------------------------------------------- Includes
  18. //
  19. #include <minoca/kernel/kernel.h>
  20. #include <minoca/kernel/kdebug.h>
  21. #include "spp.h"
  22. //
  23. // ---------------------------------------------------------------- Definitions
  24. //
  25. #define SP_ALLOCATION_TAG 0x21217053 // '!!pS'
  26. //
  27. // Define the length of the scratch buffer within the profiler buffer.
  28. //
  29. #define SCRATCH_BUFFER_LENGTH 200
  30. //
  31. // Define the size of the profiler buffer.
  32. //
  33. #define PROFILER_BUFFER_LENGTH (128 * 1024)
  34. //
  35. // Define the period between memory statistics updates, in microseoncds.
  36. //
  37. #define MEMORY_STATISTICS_TIMER_PERIOD (1000 * MICROSECONDS_PER_MILLISECOND)
  38. //
  39. // Define the number of buffers required to track memory profiling data.
  40. //
  41. #define MEMORY_BUFFER_COUNT 3
  42. //
  43. // Define the buffer size for a new process or thread.
  44. //
  45. #define PROFILER_PROCESS_INFORMATION_SIZE 1024
  46. #define PROFILER_THREAD_INFORMATION_SIZE 1024
  47. //
  48. // ------------------------------------------------------ Data Type Definitions
  49. //
  50. /*++
  51. Structure Description:
  52. This structures defines the system profiler's collection buffer.
  53. Members:
  54. Buffer - Stores a byte array of profiler data ready to be consumed. This is
  55. a ring buffer.
  56. ProducerIndex - Stores the index into the buffer that the data producer
  57. will write to next.
  58. ConsumerIndex - Stores the index into the buffer that the consumer will
  59. read from next.
  60. ConsumerStopIndex - Stores the index into the buffer that the consumer will
  61. read up to before completing a round of consuming.
  62. Scratch - Supplies a byte array used as a temporary holding place for data,
  63. allowing data collection to be performed with sequential writes before
  64. copying the data to the ring buffer.
  65. --*/
  66. typedef struct _PROFILER_BUFFER {
  67. BYTE Buffer[PROFILER_BUFFER_LENGTH];
  68. ULONG ProducerIndex;
  69. ULONG ConsumerIndex;
  70. ULONG ConsumerStopIndex;
  71. BYTE Scratch[SCRATCH_BUFFER_LENGTH];
  72. } PROFILER_BUFFER, *PPROFILER_BUFFER;
  73. /*++
  74. Structure Description:
  75. This structure defines a memory statistics collection buffer for the
  76. system profiler.
  77. Members:
  78. Buffer - Stores a byte array of memory statistics data.
  79. BufferSize - Stores the size of the buffer, in bytes.
  80. --*/
  81. typedef struct _MEMORY_BUFFER {
  82. BYTE *Buffer;
  83. ULONG BufferSize;
  84. ULONG ConsumerIndex;
  85. } MEMORY_BUFFER, *PMEMORY_BUFFER;
  86. /*++
  87. Structure Description:
  88. This structure defines the system's memory profiling state.
  89. Members:
  90. MemoryBuffers - Stores an array of points to memory buffers.
  91. ConsumerActive - Stores a boolean indicating whether or not the consumer
  92. is active.
  93. ConsumerIndex - Stores the index of the memory buffer that was last
  94. consumed or is actively being consumed.
  95. ReadyIndex - Stores the index of the next buffer from which the consumer
  96. should read.
  97. ProducerIndex - Stores the index of the next buffer to which the producer
  98. should write.
  99. Timer - Stores a pointer to the timer that controls production.
  100. ThreadAlive - Stores a boolean indicating whether the thread is alive or
  101. not.
  102. --*/
  103. typedef struct _MEMORY_PROFILER {
  104. MEMORY_BUFFER MemoryBuffers[MEMORY_BUFFER_COUNT];
  105. BOOL ConsumerActive;
  106. ULONG ConsumerIndex;
  107. ULONG ReadyIndex;
  108. ULONG ProducerIndex;
  109. PKTIMER Timer;
  110. volatile BOOL ThreadAlive;
  111. } MEMORY_PROFILER, *PMEMORY_PROFILER;
  112. //
  113. // ----------------------------------------------- Internal Function Prototypes
  114. //
  115. BOOL
  116. SppWriteProfilerBuffer (
  117. PPROFILER_BUFFER ProfilerBuffer,
  118. BYTE *Data,
  119. ULONG Length
  120. );
  121. BOOL
  122. SppReadProfilerBuffer (
  123. PPROFILER_BUFFER ProfilerBuffer,
  124. BYTE *Data,
  125. PULONG DataLength
  126. );
  127. ULONG
  128. SppReadProfilerData (
  129. BYTE *Destination,
  130. BYTE *Source,
  131. ULONG ByteCount,
  132. PULONG BytesRemaining
  133. );
  134. KSTATUS
  135. SppInitializeStackSampling (
  136. VOID
  137. );
  138. VOID
  139. SppDestroyStackSampling (
  140. ULONG Phase
  141. );
  142. KSTATUS
  143. SppInitializeMemoryStatistics (
  144. VOID
  145. );
  146. VOID
  147. SppDestroyMemoryStatistics (
  148. ULONG Phase
  149. );
  150. VOID
  151. SppMemoryStatisticsThread (
  152. PVOID Parameter
  153. );
  154. KSTATUS
  155. SppInitializeThreadStatistics (
  156. VOID
  157. );
  158. VOID
  159. SppSendInitialProcesses (
  160. VOID
  161. );
  162. KSTATUS
  163. SppSendInitialThreads (
  164. PROCESS_ID ProcessId
  165. );
  166. VOID
  167. SppProcessNewProcess (
  168. PROCESS_ID ProcessId
  169. );
  170. VOID
  171. SppProcessNewThread (
  172. PROCESS_ID ProcessId,
  173. THREAD_ID ThreadId
  174. );
  175. VOID
  176. SppDestroyThreadStatistics (
  177. ULONG Phase
  178. );
  179. VOID
  180. SppCollectThreadStatistic (
  181. PKTHREAD Thread,
  182. PPROCESSOR_BLOCK Processor,
  183. SCHEDULER_REASON ScheduleOutReason
  184. );
  185. //
  186. // -------------------------------------------------------------------- Globals
  187. //
  188. //
  189. // Stores a value indicating whether or not profiling is enabled for system
  190. // initialization. Can be set with PROFILER_TYPE_FLAG_* values.
  191. //
  192. ULONG SpEarlyEnabledFlags = 0x0;
  193. //
  194. // Stores a value indicating which types of profiling are enabled.
  195. //
  196. ULONG SpEnabledFlags;
  197. //
  198. // Stores a pointer to a queued lock protecting access to the profiling status
  199. // variables.
  200. //
  201. PQUEUED_LOCK SpProfilingQueuedLock;
  202. //
  203. // Structures that store stack sampling data.
  204. //
  205. PPROFILER_BUFFER *SpStackSamplingArray;
  206. ULONG SpStackSamplingArraySize;
  207. //
  208. // Stores a pointer to a structure that tracks memory statistics profiling.
  209. //
  210. PMEMORY_PROFILER SpMemory;
  211. //
  212. // Structures that store thread statistics.
  213. //
  214. PPROFILER_BUFFER *SpThreadStatisticsArray;
  215. ULONG SpThreadStatisticsArraySize;
  216. PSP_COLLECT_THREAD_STATISTIC SpCollectThreadStatisticRoutine;
  217. PSP_PROCESS_NEW_PROCESS SpProcessNewProcessRoutine;
  218. PSP_PROCESS_NEW_THREAD SpProcessNewThreadRoutine;
  219. //
  220. // ------------------------------------------------------------------ Functions
  221. //
  222. VOID
  223. SpProfilerInterrupt (
  224. PTRAP_FRAME TrapFrame
  225. )
  226. /*++
  227. Routine Description:
  228. This routine handles periodic profiler interrupts, collecting information
  229. about the system for analysis.
  230. Arguments:
  231. TrapFrame - Supplies a pointer to the current trap frame.
  232. Return Value:
  233. None.
  234. --*/
  235. {
  236. PVOID *CallStack;
  237. ULONG CallStackSize;
  238. ULONG Processor;
  239. PVOID Scratch;
  240. KSTATUS Status;
  241. ASSERT(KeGetRunLevel() == RunLevelHigh);
  242. //
  243. // Immediately return if stack sampling is not enabled. It may have been
  244. // turned off while this interrupt was pending.
  245. //
  246. if ((SpEnabledFlags & PROFILER_TYPE_FLAG_STACK_SAMPLING) == 0) {
  247. return;
  248. }
  249. //
  250. // Do nothing on interrupt replay if the trap frame is NULL.
  251. //
  252. if (TrapFrame == NULL) {
  253. return;
  254. }
  255. //
  256. // Do not collect data on processors that have not been initialized for
  257. // profiling.
  258. //
  259. Processor = KeGetCurrentProcessorNumber();
  260. if (Processor >= SpStackSamplingArraySize) {
  261. return;
  262. }
  263. //
  264. // Collect the stack data from the trap frame.
  265. //
  266. Scratch = SpStackSamplingArray[Processor]->Scratch;
  267. CallStackSize = SCRATCH_BUFFER_LENGTH - sizeof(UINTN);
  268. CallStack = (PVOID *)(Scratch + sizeof(UINTN));
  269. Status = SppArchGetKernelStackData(TrapFrame, CallStack, &CallStackSize);
  270. if (!KSUCCESS(Status)) {
  271. return;
  272. }
  273. ASSERT(CallStackSize != 0);
  274. CallStackSize += sizeof(UINTN);
  275. *((PUINTN)Scratch) = PROFILER_DATA_SENTINEL | CallStackSize;
  276. //
  277. // Write the data to the sampling buffer.
  278. //
  279. SppWriteProfilerBuffer(SpStackSamplingArray[Processor],
  280. (BYTE *)Scratch,
  281. CallStackSize);
  282. return;
  283. }
  284. VOID
  285. SpSendProfilingData (
  286. VOID
  287. )
  288. /*++
  289. Routine Description:
  290. This routine sends profiling data to any listening consumer. It is called
  291. periodically on each processor during the clock interrupt.
  292. Arguments:
  293. None.
  294. Return Value:
  295. None.
  296. --*/
  297. {
  298. ASSERT(KeGetRunLevel() >= RunLevelClock);
  299. //
  300. // Call out to the current profiling consumer to have that component ask
  301. // for the data.
  302. //
  303. KdSendProfilingData();
  304. return;
  305. }
  306. KSTATUS
  307. SpGetProfilerData (
  308. PPROFILER_NOTIFICATION ProfilerNotification,
  309. PULONG Flags
  310. )
  311. /*++
  312. Routine Description:
  313. This routine fills the provided profiler notification with profiling data.
  314. A profiler consumer should call this routine to obtain data to send over
  315. the wire. It is assumed here that consumers will serialize consumption.
  316. Arguments:
  317. ProfilerNotification - Supplies a pointer to the profiler notification that
  318. is to be filled in with profiling data.
  319. Flags - Supplies a pointer to the types of profiling data the caller wants
  320. to collect. Upon return, the flags for the returned data will be
  321. returned.
  322. Return Value:
  323. Status code.
  324. --*/
  325. {
  326. ULONG DataSize;
  327. PMEMORY_BUFFER MemoryBuffer;
  328. ULONG Processor;
  329. BOOL ReadMore;
  330. ULONG RemainingLength;
  331. ASSERT(Flags != NULL);
  332. ASSERT(*Flags != 0);
  333. //
  334. // Process the requested profiling data in a set order, removing each type
  335. // from the set of flags as it is processed.
  336. //
  337. if ((*Flags & PROFILER_TYPE_FLAG_STACK_SAMPLING) != 0) {
  338. Processor = KeGetCurrentProcessorNumber();
  339. ASSERT(Processor < SpStackSamplingArraySize);
  340. //
  341. // Fill the buffer with data from the current processor's stack
  342. // sampling data.
  343. //
  344. ReadMore = SppReadProfilerBuffer(
  345. SpStackSamplingArray[Processor],
  346. ProfilerNotification->Data,
  347. &ProfilerNotification->Header.DataSize);
  348. ProfilerNotification->Header.Type = ProfilerDataTypeStack;
  349. ProfilerNotification->Header.Processor = Processor;
  350. //
  351. // If no more data is available, it means that the consumer has read up
  352. // to the producer, or up to its stop point (the point the producer was
  353. // at when the consumer started).
  354. //
  355. if (ReadMore == FALSE) {
  356. *Flags &= ~PROFILER_TYPE_FLAG_STACK_SAMPLING;
  357. }
  358. } else if ((*Flags & PROFILER_TYPE_FLAG_MEMORY_STATISTICS) != 0) {
  359. //
  360. // If the consumer is not currently active, then get the next buffer to
  361. // consume, which is indicated by the ready index.
  362. //
  363. if (SpMemory->ConsumerActive == FALSE) {
  364. SpMemory->ConsumerIndex = SpMemory->ReadyIndex;
  365. SpMemory->ConsumerActive = TRUE;
  366. }
  367. //
  368. // Copy as much data as possible from the consumer buffer to the
  369. // profiler notification data buffer.
  370. //
  371. MemoryBuffer = &(SpMemory->MemoryBuffers[SpMemory->ConsumerIndex]);
  372. RemainingLength = MemoryBuffer->BufferSize -
  373. MemoryBuffer->ConsumerIndex;
  374. if (RemainingLength < ProfilerNotification->Header.DataSize) {
  375. DataSize = RemainingLength;
  376. } else {
  377. DataSize = ProfilerNotification->Header.DataSize;
  378. }
  379. if (DataSize != 0) {
  380. RtlCopyMemory(ProfilerNotification->Data,
  381. &(MemoryBuffer->Buffer[MemoryBuffer->ConsumerIndex]),
  382. DataSize);
  383. }
  384. MemoryBuffer->ConsumerIndex += DataSize;
  385. ProfilerNotification->Header.Type = ProfilerDataTypeMemory;
  386. ProfilerNotification->Header.Processor = KeGetCurrentProcessorNumber();
  387. ProfilerNotification->Header.DataSize = DataSize;
  388. //
  389. // Mark the consumer inactive if all the data was consumed.
  390. //
  391. if (MemoryBuffer->ConsumerIndex == MemoryBuffer->BufferSize) {
  392. SpMemory->ConsumerActive = FALSE;
  393. *Flags &= ~PROFILER_TYPE_FLAG_MEMORY_STATISTICS;
  394. }
  395. } else if ((*Flags & PROFILER_TYPE_FLAG_THREAD_STATISTICS) != 0) {
  396. Processor = KeGetCurrentProcessorNumber();
  397. ASSERT(Processor < SpThreadStatisticsArraySize);
  398. //
  399. // Fill the buffer with data from the current processor's stack
  400. // sampling data.
  401. //
  402. ReadMore = SppReadProfilerBuffer(
  403. SpThreadStatisticsArray[Processor],
  404. ProfilerNotification->Data,
  405. &(ProfilerNotification->Header.DataSize));
  406. ProfilerNotification->Header.Type = ProfilerDataTypeThread;
  407. ProfilerNotification->Header.Processor = Processor;
  408. //
  409. // If no more data is available, it means that the consumer has read up
  410. // to the producer, or up to its stop point (the point the producer was
  411. // at when the consumer started).
  412. //
  413. if (ReadMore == FALSE) {
  414. *Flags &= ~PROFILER_TYPE_FLAG_THREAD_STATISTICS;
  415. }
  416. }
  417. return STATUS_SUCCESS;
  418. }
  419. ULONG
  420. SpGetProfilerDataStatus (
  421. VOID
  422. )
  423. /*++
  424. Routine Description:
  425. This routine determines if there is profiling data for the current
  426. processor that needs to be sent to a consumer.
  427. Arguments:
  428. None.
  429. Return Value:
  430. Returns a set of flags representing which types of profiling data are
  431. available. Returns zero if nothing is available.
  432. --*/
  433. {
  434. PPROFILER_BUFFER Buffer;
  435. ULONG Flags;
  436. ULONG Processor;
  437. ASSERT(KeGetRunLevel() >= RunLevelClock);
  438. if (SpEnabledFlags == 0) {
  439. return 0;
  440. }
  441. Flags = SpEnabledFlags;
  442. //
  443. // Determine if there is stack sampling data to send.
  444. //
  445. if ((Flags & PROFILER_TYPE_FLAG_STACK_SAMPLING) != 0) {
  446. //
  447. // If stack sampling is not yet initialized on this processor remove it
  448. // from the flags.
  449. //
  450. Processor = KeGetCurrentProcessorNumber();
  451. if (Processor >= SpStackSamplingArraySize) {
  452. Flags &= ~PROFILER_TYPE_FLAG_STACK_SAMPLING;
  453. //
  454. // Otherwise if the stack sampling buffer is empty, then remove it from
  455. // the flags.
  456. //
  457. // N.B. This access if safe because the stack sampling destruction code
  458. // waits for least one clock interrupt after disabling stack
  459. // sampling before destroying the global array.
  460. //
  461. } else {
  462. Buffer = SpStackSamplingArray[Processor];
  463. if (Buffer->ProducerIndex == Buffer->ConsumerIndex) {
  464. Flags &= ~PROFILER_TYPE_FLAG_STACK_SAMPLING;
  465. }
  466. }
  467. }
  468. //
  469. // Determine if there are memory statistics to send.
  470. //
  471. if ((Flags & PROFILER_TYPE_FLAG_MEMORY_STATISTICS) != 0) {
  472. //
  473. // There is no new data if the consumer index still equals the ready
  474. // index or the producer index.
  475. //
  476. if ((SpMemory->ConsumerIndex == SpMemory->ReadyIndex) ||
  477. (SpMemory->ConsumerIndex == SpMemory->ProducerIndex)) {
  478. Flags &= ~PROFILER_TYPE_FLAG_MEMORY_STATISTICS;
  479. }
  480. }
  481. if ((Flags & PROFILER_TYPE_FLAG_THREAD_STATISTICS) != 0) {
  482. //
  483. // If thread statistics are not yet initialized on this processor
  484. // remove them from the flags.
  485. //
  486. Processor = KeGetCurrentProcessorNumber();
  487. if (Processor >= SpThreadStatisticsArraySize) {
  488. Flags &= ~PROFILER_TYPE_FLAG_THREAD_STATISTICS;
  489. //
  490. // Otherwise if the thread statistics buffer is empty, then remove it
  491. // from the flags.
  492. //
  493. // N.B. This access if safe because the thread statistics destruction
  494. // code waits for least one clock interrupt after disabling
  495. // profiling before destroying the global array.
  496. //
  497. } else {
  498. Buffer = SpThreadStatisticsArray[Processor];
  499. if (Buffer->ProducerIndex == Buffer->ConsumerIndex) {
  500. Flags &= ~PROFILER_TYPE_FLAG_THREAD_STATISTICS;
  501. }
  502. }
  503. }
  504. return Flags;
  505. }
  506. KSTATUS
  507. SpInitializeProfiler (
  508. VOID
  509. )
  510. /*++
  511. Routine Description:
  512. This routine initializes system profiling at processor start-up. It
  513. extends the profiling infrastructure as each processor comes online. If
  514. early profiling is not enabled, this routine just exits.
  515. Arguments:
  516. None.
  517. Return Value:
  518. Status code.
  519. --*/
  520. {
  521. KSTATUS Status;
  522. ASSERT(KeGetRunLevel() <= RunLevelDispatch);
  523. ASSERT(KeGetCurrentProcessorNumber() == 0);
  524. //
  525. // Always initialize the profiling lock in case profiling gets enabled
  526. // later on.
  527. //
  528. SpProfilingQueuedLock = KeCreateQueuedLock();
  529. if (SpProfilingQueuedLock == NULL) {
  530. Status = STATUS_INSUFFICIENT_RESOURCES;
  531. goto InitializeProfilerEnd;
  532. }
  533. //
  534. // Do nothing more if early profiling is not enabled for any profiling
  535. // types.
  536. //
  537. if (SpEarlyEnabledFlags != 0) {
  538. KeAcquireQueuedLock(SpProfilingQueuedLock);
  539. Status = SppStartSystemProfiler(SpEarlyEnabledFlags);
  540. KeReleaseQueuedLock(SpProfilingQueuedLock);
  541. if (!KSUCCESS(Status)) {
  542. goto InitializeProfilerEnd;
  543. }
  544. }
  545. Status = STATUS_SUCCESS;
  546. InitializeProfilerEnd:
  547. return Status;
  548. }
  549. KSTATUS
  550. SppStartSystemProfiler (
  551. ULONG Flags
  552. )
  553. /*++
  554. Routine Description:
  555. This routine starts the system profiler. This routine must be called at low
  556. level. It assumes the profiler queued lock is held.
  557. Arguments:
  558. Flags - Supplies a set of flags representing the types of profiling that
  559. should be started.
  560. Return Value:
  561. Status code.
  562. --*/
  563. {
  564. ULONG InitializedFlags;
  565. ULONG NewFlags;
  566. KSTATUS Status;
  567. ASSERT(KeGetRunLevel() == RunLevelLow);
  568. ASSERT(KeIsQueuedLockHeld(SpProfilingQueuedLock) != FALSE);
  569. //
  570. // The caller must specify flags.
  571. //
  572. if (Flags == 0) {
  573. return STATUS_INVALID_PARAMETER;
  574. }
  575. InitializedFlags = 0;
  576. //
  577. // Determine what new profiling types need to be started.
  578. //
  579. NewFlags = Flags & ~SpEnabledFlags;
  580. //
  581. // If all the desired profiling types are already active, then just exit.
  582. //
  583. if (NewFlags == 0) {
  584. Status = STATUS_SUCCESS;
  585. goto StartSystemProfilerEnd;
  586. }
  587. //
  588. // Initialize the system profiler for each of the new types.
  589. //
  590. if ((NewFlags & PROFILER_TYPE_FLAG_STACK_SAMPLING) != 0) {
  591. Status = SppInitializeStackSampling();
  592. if (!KSUCCESS(Status)) {
  593. goto StartSystemProfilerEnd;
  594. }
  595. InitializedFlags |= PROFILER_TYPE_FLAG_STACK_SAMPLING;
  596. }
  597. if ((NewFlags & PROFILER_TYPE_FLAG_MEMORY_STATISTICS) != 0) {
  598. Status = SppInitializeMemoryStatistics();
  599. if (!KSUCCESS(Status)) {
  600. goto StartSystemProfilerEnd;
  601. }
  602. InitializedFlags |= PROFILER_TYPE_FLAG_MEMORY_STATISTICS;
  603. }
  604. if ((NewFlags & PROFILER_TYPE_FLAG_THREAD_STATISTICS) != 0) {
  605. Status = SppInitializeThreadStatistics();
  606. if (!KSUCCESS(Status)) {
  607. goto StartSystemProfilerEnd;
  608. }
  609. InitializedFlags |= PROFILER_TYPE_FLAG_THREAD_STATISTICS;
  610. }
  611. KeUpdateClockForProfiling(TRUE);
  612. Status = STATUS_SUCCESS;
  613. StartSystemProfilerEnd:
  614. if (!KSUCCESS(Status)) {
  615. if (InitializedFlags != 0) {
  616. SppStopSystemProfiler(InitializedFlags);
  617. }
  618. }
  619. return Status;
  620. }
  621. KSTATUS
  622. SppStopSystemProfiler (
  623. ULONG Flags
  624. )
  625. /*++
  626. Routine Description:
  627. This routine stops the system profiler and destroys the profiling data
  628. structures. This routine must be called at low level. It assumes the
  629. profiler queued lock is held.
  630. Arguments:
  631. Flags - Supplies a set of flags representing the types of profiling that
  632. should be stopped.
  633. Return Value:
  634. Status code.
  635. --*/
  636. {
  637. BOOL DelayRequired;
  638. ULONG DisableFlags;
  639. ULONG Index;
  640. ULONG *InterruptCounts;
  641. RUNLEVEL OldRunLevel;
  642. ULONG ProcessorCount;
  643. PROCESSOR_SET Processors;
  644. KSTATUS Status;
  645. ASSERT(KeGetRunLevel() == RunLevelLow);
  646. ASSERT(KeIsQueuedLockHeld(SpProfilingQueuedLock) != FALSE);
  647. //
  648. // The caller must specify flags.
  649. //
  650. if (Flags == 0) {
  651. return STATUS_INVALID_PARAMETER;
  652. }
  653. //
  654. // Determine what profiling types need to be stopped.
  655. //
  656. DisableFlags = Flags & SpEnabledFlags;
  657. //
  658. // If profiling is already disabled for the requested profiling types, then
  659. // just exit.
  660. //
  661. if (DisableFlags == 0) {
  662. Status = STATUS_SUCCESS;
  663. goto StopSystemProfilerEnd;
  664. }
  665. //
  666. // Phase 0 destroy stops the system profiler for each type that needs to be
  667. // stopped.
  668. //
  669. if ((DisableFlags & PROFILER_TYPE_FLAG_STACK_SAMPLING) != 0) {
  670. SppDestroyStackSampling(0);
  671. }
  672. if ((DisableFlags & PROFILER_TYPE_FLAG_MEMORY_STATISTICS) != 0) {
  673. SppDestroyMemoryStatistics(0);
  674. }
  675. if ((DisableFlags & PROFILER_TYPE_FLAG_THREAD_STATISTICS) != 0) {
  676. SppDestroyThreadStatistics(0);
  677. }
  678. //
  679. // Once phase zero destruction is complete, each profiler has stopped
  680. // producing data immediately, but another core may be in the middle of
  681. // consuming profiling data during its clock interrupt. Wait until each
  682. // processor has received the notice that profiling is now disabled and
  683. // then destroy each profiler's data structures. This is guaranteed after
  684. // the clock interrupt has incremented once. If an array cannot be
  685. // allocated for the processor counts, then just yield for a bit. It is not
  686. // good enough to just send an IPI-level interrupt to each core. This may
  687. // land on top of a clock interrupt in the middle of checking to see if
  688. // there is pending profiling data, which is not done with interrupts
  689. // disabled (i.e. the IPI-level interrupt running doesn't indicate the
  690. // other core is done with the data). As a result, this routine could run
  691. // through and release the buffers being observed by the other core.
  692. //
  693. ProcessorCount = KeGetActiveProcessorCount();
  694. if (ProcessorCount > 1) {
  695. DelayRequired = TRUE;
  696. InterruptCounts = MmAllocateNonPagedPool(ProcessorCount * sizeof(ULONG),
  697. SP_ALLOCATION_TAG);
  698. if (InterruptCounts != NULL) {
  699. for (Index = 0; Index < ProcessorCount; Index += 1) {
  700. InterruptCounts[Index] = KeGetClockInterruptCount(Index);
  701. }
  702. //
  703. // As some cores may have gone idle, send a clock IPI out to all of
  704. // them to make sure the interrupt count gets incremented.
  705. //
  706. Processors.Target = ProcessorTargetAll;
  707. OldRunLevel = KeRaiseRunLevel(RunLevelDispatch);
  708. Status = HlSendIpi(IpiTypeClock, &Processors);
  709. KeLowerRunLevel(OldRunLevel);
  710. if (KSUCCESS(Status)) {
  711. for (Index = 0; Index < ProcessorCount; Index += 1) {
  712. while (KeGetClockInterruptCount(Index) <=
  713. InterruptCounts[Index]) {
  714. KeYield();
  715. }
  716. }
  717. DelayRequired = FALSE;
  718. }
  719. MmFreeNonPagedPool(InterruptCounts);
  720. }
  721. //
  722. // If the allocation or IPI failed above, wait a conservative second to
  723. // make sure all the cores are done consuming the profiler data.
  724. //
  725. if (DelayRequired != FALSE) {
  726. KeDelayExecution(FALSE, FALSE, MICROSECONDS_PER_SECOND);
  727. }
  728. }
  729. //
  730. // Phase 1 destroy releases any resources for each type of profiling that
  731. // was stopped in phase 0.
  732. //
  733. if ((DisableFlags & PROFILER_TYPE_FLAG_STACK_SAMPLING) != 0) {
  734. SppDestroyStackSampling(1);
  735. }
  736. if ((DisableFlags & PROFILER_TYPE_FLAG_MEMORY_STATISTICS) != 0) {
  737. SppDestroyMemoryStatistics(1);
  738. }
  739. if ((DisableFlags & PROFILER_TYPE_FLAG_THREAD_STATISTICS) != 0) {
  740. SppDestroyThreadStatistics(1);
  741. }
  742. if (SpEnabledFlags == 0) {
  743. KeUpdateClockForProfiling(FALSE);
  744. }
  745. Status = STATUS_SUCCESS;
  746. StopSystemProfilerEnd:
  747. return Status;
  748. }
  749. //
  750. // --------------------------------------------------------- Internal Functions
  751. //
  752. BOOL
  753. SppWriteProfilerBuffer (
  754. PPROFILER_BUFFER ProfilerBuffer,
  755. BYTE *Data,
  756. ULONG DataLength
  757. )
  758. /*++
  759. Routine Description:
  760. This routine writes the supplied data to the profiler data buffer. If there
  761. is not enough room in the buffer, it just exits.
  762. Arguments:
  763. ProfilerBuffer - Supplies a pointer to a profiler data buffer to which the
  764. given data should be written.
  765. Data - Supplies an array of bytes to write to the profiler data buffer.
  766. DataLength - Supplies the length of the data array, in bytes.
  767. Return Value:
  768. TRUE if the data was successfully added to the buffer.
  769. FALSE if the data was dropped.
  770. --*/
  771. {
  772. ULONG AvailableLength;
  773. ULONG BufferIndex;
  774. ULONG ConsumerIndex;
  775. ULONG DataIndex;
  776. ULONG FirstWriteLength;
  777. ULONG ProducerIndex;
  778. BOOL Result;
  779. ULONG SecondWriteLength;
  780. ULONG WriteLength;
  781. ConsumerIndex = ProfilerBuffer->ConsumerIndex;
  782. ProducerIndex = ProfilerBuffer->ProducerIndex;
  783. //
  784. // If the producer's and consumer's indices are equal, then the buffer is
  785. // empty. Allow the producer to write up until the end of the buffer, being
  786. // careful to never completely fill the buffer to differentiate between an
  787. // empty buffer and a full buffer.
  788. //
  789. if (ProducerIndex == ConsumerIndex) {
  790. AvailableLength = PROFILER_BUFFER_LENGTH - 1;
  791. //
  792. // If the producer's index is greater than the consumer's, then two writes
  793. // may be necessary to fill the buffer. Account for wrapping when
  794. // calculating the available length.
  795. //
  796. } else if (ProducerIndex > ConsumerIndex) {
  797. AvailableLength = PROFILER_BUFFER_LENGTH - ProducerIndex;
  798. AvailableLength += (ConsumerIndex - 1);
  799. //
  800. // If the producer's index is less than the consumer's, then allow the
  801. // producer to write up until 1 less than the consumer's index.
  802. //
  803. } else {
  804. ASSERT(ProducerIndex < ConsumerIndex);
  805. AvailableLength = (ConsumerIndex - 1) - ProducerIndex;
  806. }
  807. //
  808. // If the available length is not enough for the data, exit.
  809. //
  810. if (AvailableLength < DataLength) {
  811. Result = FALSE;
  812. goto WriteProfilerBufferEnd;
  813. }
  814. //
  815. // Determine if the write needs to be broken into two operations.
  816. //
  817. if ((ProducerIndex + DataLength) > PROFILER_BUFFER_LENGTH) {
  818. FirstWriteLength = PROFILER_BUFFER_LENGTH - ProducerIndex;
  819. ASSERT(FirstWriteLength <= DataLength);
  820. SecondWriteLength = DataLength - FirstWriteLength;
  821. } else {
  822. FirstWriteLength = DataLength;
  823. SecondWriteLength = 0;
  824. }
  825. //
  826. // Write the data to the buffer.
  827. //
  828. DataIndex = 0;
  829. BufferIndex = ProducerIndex;
  830. WriteLength = FirstWriteLength;
  831. RtlCopyMemory(&(ProfilerBuffer->Buffer[BufferIndex]),
  832. &(Data[DataIndex]),
  833. WriteLength);
  834. if (SecondWriteLength != 0) {
  835. DataIndex = WriteLength;
  836. BufferIndex = 0;
  837. WriteLength = SecondWriteLength;
  838. RtlCopyMemory(&(ProfilerBuffer->Buffer[BufferIndex]),
  839. &(Data[DataIndex]),
  840. WriteLength);
  841. }
  842. //
  843. // Update the producer index.
  844. //
  845. ProducerIndex = BufferIndex + WriteLength;
  846. if (ProducerIndex == PROFILER_BUFFER_LENGTH) {
  847. ProfilerBuffer->ProducerIndex = 0;
  848. } else {
  849. ProfilerBuffer->ProducerIndex = ProducerIndex;
  850. }
  851. Result = TRUE;
  852. WriteProfilerBufferEnd:
  853. return Result;
  854. }
  855. BOOL
  856. SppReadProfilerBuffer (
  857. PPROFILER_BUFFER ProfilerBuffer,
  858. BYTE *Data,
  859. PULONG DataLength
  860. )
  861. /*++
  862. Routine Description:
  863. This routine reads up to the provided data length of bytes from the given
  864. profiler buffer. Upon return, the data length is modified to reflect the
  865. total number of bytes read. If there are no new bytes to read from the
  866. buffer, then a data length of zero is returned.
  867. Arguments:
  868. ProfilerBuffer - Supplies a pointer to a profiler data buffer from which
  869. up to data length bytes will be read.
  870. Data - Supplies an array of bytes that is to receive data from the profiler
  871. buffer.
  872. DataLength - Supplies the maximum number of bytes that can be read into the
  873. data buffer. Receives the total bytes read upon return.
  874. Return Value:
  875. Returns TRUE if there is more data to be read, or FALSE otherwise..
  876. --*/
  877. {
  878. ULONG AvailableLength;
  879. ULONG BytesRead;
  880. ULONG ConsumerIndex;
  881. ULONG ConsumerStopIndex;
  882. ULONG FirstReadLength;
  883. BOOL MoreDataAvailable;
  884. ULONG ProducerIndex;
  885. ULONG RemainingLength;
  886. ULONG SecondReadLength;
  887. ULONG TotalReadLength;
  888. ASSERT(ProfilerBuffer != NULL);
  889. ASSERT(Data != NULL);
  890. ASSERT(DataLength != NULL);
  891. ConsumerIndex = ProfilerBuffer->ConsumerIndex;
  892. ProducerIndex = ProfilerBuffer->ProducerIndex;
  893. ConsumerStopIndex = ProfilerBuffer->ConsumerStopIndex;
  894. SecondReadLength = 0;
  895. AvailableLength = *DataLength;
  896. *DataLength = 0;
  897. MoreDataAvailable = FALSE;
  898. //
  899. // If the stop index equals the consumer index, then advance it to
  900. // the producer index in order to gather all of the currently available
  901. // data. Do this so that the consumer will eventually complete when faced
  902. // with a speedy producer.
  903. //
  904. if (ConsumerIndex == ConsumerStopIndex) {
  905. ProfilerBuffer->ConsumerStopIndex = ProducerIndex;
  906. }
  907. //
  908. // If the producer's and consumer's indices are equal, then there are no
  909. // bytes to consume. The buffer is empty.
  910. //
  911. if (ProducerIndex == ConsumerIndex) {
  912. goto EmptyProfilerBufferEnd;
  913. //
  914. // If the producer is ahead of the consumer, then consume the buffer all
  915. // the way up to the producer's index or up to the provided buffer size.
  916. //
  917. } else if (ProducerIndex > ConsumerIndex) {
  918. FirstReadLength = ProducerIndex - ConsumerIndex;
  919. if (FirstReadLength > AvailableLength) {
  920. FirstReadLength = AvailableLength;
  921. }
  922. //
  923. // If the producer is behind the consumer, then two reads are required to
  924. // wrap around the circular buffer. Truncate based on the provided data
  925. // length.
  926. //
  927. } else {
  928. ASSERT(ProducerIndex < ConsumerIndex);
  929. FirstReadLength = PROFILER_BUFFER_LENGTH - ConsumerIndex;
  930. if (FirstReadLength > AvailableLength) {
  931. FirstReadLength = AvailableLength;
  932. } else {
  933. SecondReadLength = ProducerIndex;
  934. if ((FirstReadLength + SecondReadLength) > AvailableLength) {
  935. SecondReadLength = AvailableLength - FirstReadLength;
  936. }
  937. }
  938. }
  939. TotalReadLength = FirstReadLength + SecondReadLength;
  940. //
  941. // The provided data buffer should be large enough to fit the determined
  942. // reads.
  943. //
  944. ASSERT(AvailableLength >= TotalReadLength);
  945. //
  946. // Read the data out into the supplied buffer, making sure to read on the
  947. // profiler unit boundary, as marked by the sentinel.
  948. //
  949. RemainingLength = TotalReadLength;
  950. BytesRead = SppReadProfilerData(&(Data[0]),
  951. &(ProfilerBuffer->Buffer[ConsumerIndex]),
  952. FirstReadLength,
  953. &RemainingLength);
  954. if ((SecondReadLength != 0) && (BytesRead == FirstReadLength)) {
  955. ASSERT(RemainingLength == SecondReadLength);
  956. BytesRead = SppReadProfilerData(&(Data[FirstReadLength]),
  957. &(ProfilerBuffer->Buffer[0]),
  958. SecondReadLength,
  959. &RemainingLength);
  960. ASSERT(SecondReadLength == (BytesRead + RemainingLength));
  961. ConsumerIndex = BytesRead;
  962. } else {
  963. ConsumerIndex += BytesRead;
  964. }
  965. //
  966. // Update the data length based on how much data was read.
  967. //
  968. *DataLength = TotalReadLength - RemainingLength;
  969. //
  970. // Update the consumer index.
  971. //
  972. if (ConsumerIndex == PROFILER_BUFFER_LENGTH) {
  973. ProfilerBuffer->ConsumerIndex = 0;
  974. } else {
  975. ProfilerBuffer->ConsumerIndex = ConsumerIndex;
  976. }
  977. //
  978. // If the stop index has been reached with this read, let the caller know
  979. // that there is no more data to collect at this time.
  980. //
  981. if (ProfilerBuffer->ConsumerIndex != ProfilerBuffer->ConsumerStopIndex) {
  982. MoreDataAvailable = TRUE;
  983. }
  984. EmptyProfilerBufferEnd:
  985. return MoreDataAvailable;
  986. }
  987. ULONG
  988. SppReadProfilerData (
  989. BYTE *Destination,
  990. BYTE *Source,
  991. ULONG ByteCount,
  992. PULONG BytesRemaining
  993. )
  994. /*++
  995. Routine Description:
  996. This routine reads as many profiler data units as it can, up to the
  997. supplied byte count, making sure to never exceed the remaining available
  998. bytes.
  999. Arguments:
  1000. Destination - Supplies a pointer to the destination data buffer.
  1001. Source - Supplies a pointer to the source data buffer.
  1002. ByteCount - Supplies the maximum number of bytes that should be read out of
  1003. the source buffer.
  1004. BytesRemaining - Supplies a pointer to the maximum number of bytes that
  1005. can be read to the destination buffer. It is updated upon return.
  1006. Return Value:
  1007. Returns the number of bytes read by this routine.
  1008. --*/
  1009. {
  1010. ULONG BytesRead;
  1011. ULONG DestinationIndex;
  1012. ULONG SourceIndex;
  1013. ULONG Value;
  1014. BytesRead = 0;
  1015. DestinationIndex = 0;
  1016. for (SourceIndex = 0; SourceIndex < ByteCount; SourceIndex += 1) {
  1017. //
  1018. // If the current byte is the start of the sentinel, check the length
  1019. // of the next data packet and do not continue if it will not fit in
  1020. // the destination buffer.
  1021. //
  1022. Value = *((PULONG)&(Source[SourceIndex]));
  1023. if (IS_PROFILER_DATA_SENTINEL(Value) != FALSE) {
  1024. if (GET_PROFILER_DATA_SIZE(Value) > *BytesRemaining) {
  1025. break;
  1026. }
  1027. }
  1028. Destination[DestinationIndex] = Source[SourceIndex];
  1029. DestinationIndex += 1;
  1030. *BytesRemaining -= 1;
  1031. BytesRead += 1;
  1032. }
  1033. return BytesRead;
  1034. }
  1035. KSTATUS
  1036. SppInitializeStackSampling (
  1037. VOID
  1038. )
  1039. /*++
  1040. Routine Description:
  1041. This routine initializes the system's profiling data structures.
  1042. Arguments:
  1043. None.
  1044. Return Value:
  1045. Status code.
  1046. --*/
  1047. {
  1048. ULONG AllocationSize;
  1049. ULONG Index;
  1050. ULONG ProcessorCount;
  1051. PPROFILER_BUFFER ProfilerBuffer;
  1052. PPROFILER_BUFFER *StackSamplingArray;
  1053. KSTATUS Status;
  1054. ASSERT(KeGetRunLevel() == RunLevelLow);
  1055. ASSERT((SpEnabledFlags & PROFILER_TYPE_FLAG_STACK_SAMPLING) == 0);
  1056. ASSERT(KeIsQueuedLockHeld(SpProfilingQueuedLock) != FALSE);
  1057. ASSERT(SpStackSamplingArray == NULL);
  1058. ASSERT(SpStackSamplingArraySize == 0);
  1059. ProcessorCount = KeGetActiveProcessorCount();
  1060. AllocationSize = ProcessorCount * sizeof(PPROFILER_BUFFER);
  1061. StackSamplingArray = MmAllocateNonPagedPool(AllocationSize,
  1062. SP_ALLOCATION_TAG);
  1063. if (StackSamplingArray == NULL) {
  1064. Status = STATUS_INSUFFICIENT_RESOURCES;
  1065. goto InitializeProfilerEnd;
  1066. }
  1067. //
  1068. // Now fill in the array with profiler buffers.
  1069. //
  1070. RtlZeroMemory(StackSamplingArray, AllocationSize);
  1071. for (Index = 0; Index < ProcessorCount; Index += 1) {
  1072. ProfilerBuffer = MmAllocateNonPagedPool(sizeof(PROFILER_BUFFER),
  1073. SP_ALLOCATION_TAG);
  1074. if (ProfilerBuffer == NULL) {
  1075. Status = STATUS_INSUFFICIENT_RESOURCES;
  1076. goto InitializeProfilerEnd;
  1077. }
  1078. RtlZeroMemory(ProfilerBuffer, sizeof(PROFILER_BUFFER));
  1079. StackSamplingArray[Index] = ProfilerBuffer;
  1080. }
  1081. //
  1082. // Start the timer and then mark the profiler as enabled and update the
  1083. // stack sampling globals. This might cause some initial interrupts to skip
  1084. // data collection, but that's OK.
  1085. //
  1086. Status = HlStartProfilerTimer();
  1087. if (!KSUCCESS(Status)) {
  1088. goto InitializeProfilerEnd;
  1089. }
  1090. SpStackSamplingArray = StackSamplingArray;
  1091. SpStackSamplingArraySize = ProcessorCount;
  1092. RtlMemoryBarrier();
  1093. SpEnabledFlags |= PROFILER_TYPE_FLAG_STACK_SAMPLING;
  1094. InitializeProfilerEnd:
  1095. if (!KSUCCESS(Status)) {
  1096. for (Index = 0; Index < ProcessorCount; Index += 1) {
  1097. if (StackSamplingArray[Index] != NULL) {
  1098. MmFreeNonPagedPool(StackSamplingArray[Index]);
  1099. }
  1100. }
  1101. MmFreeNonPagedPool(StackSamplingArray);
  1102. }
  1103. return Status;
  1104. }
  1105. VOID
  1106. SppDestroyStackSampling (
  1107. ULONG Phase
  1108. )
  1109. /*++
  1110. Routine Description:
  1111. This routine tears down stack sampling by disabling the profiler timer and
  1112. destroy the stack sampling data structures. Phase 0 stops the stack
  1113. sampling profiler producers and consumers. Phase 1 cleans up resources.
  1114. Arguments:
  1115. Phase - Supplies the current phase of the destruction process.
  1116. Return Value:
  1117. None.
  1118. --*/
  1119. {
  1120. ULONG Index;
  1121. ASSERT(KeGetRunLevel() == RunLevelLow);
  1122. ASSERT(KeIsQueuedLockHeld(SpProfilingQueuedLock) != FALSE);
  1123. ASSERT(SpStackSamplingArray != NULL);
  1124. ASSERT(SpStackSamplingArraySize != 0);
  1125. if (Phase == 0) {
  1126. ASSERT((SpEnabledFlags & PROFILER_TYPE_FLAG_STACK_SAMPLING) != 0);
  1127. //
  1128. // Disable stack sampling before disabling the profiler timer to
  1129. // prevent any pending producer interrupts from touching the buffers
  1130. // after they are released
  1131. //
  1132. SpEnabledFlags &= ~PROFILER_TYPE_FLAG_STACK_SAMPLING;
  1133. //
  1134. // Stop the profiler timer. Since the caller will wait for at least
  1135. // one more clock interrupt, it is safe to proceed even though stopping
  1136. // the timer doesn't guarantee the profiler interrupt will not run
  1137. // again. It could be pending on another processor. The wait for the
  1138. // clock interrupt will guarantee that the all high level and IPI
  1139. // interrupts have completed.
  1140. //
  1141. HlStopProfilerTimer();
  1142. } else {
  1143. ASSERT(Phase == 1);
  1144. ASSERT((SpEnabledFlags & PROFILER_TYPE_FLAG_STACK_SAMPLING) == 0);
  1145. //
  1146. // Destroy the stack sampling array.
  1147. //
  1148. for (Index = 0; Index < SpStackSamplingArraySize; Index += 1) {
  1149. if (SpStackSamplingArray[Index] != NULL) {
  1150. MmFreeNonPagedPool(SpStackSamplingArray[Index]);
  1151. }
  1152. }
  1153. MmFreeNonPagedPool(SpStackSamplingArray);
  1154. SpStackSamplingArray = NULL;
  1155. SpStackSamplingArraySize = 0;
  1156. }
  1157. return;
  1158. }
  1159. KSTATUS
  1160. SppInitializeMemoryStatistics (
  1161. VOID
  1162. )
  1163. /*++
  1164. Routine Description:
  1165. This routine initializes the structures and timers necessary for profiling
  1166. system memory statistics.
  1167. Arguments:
  1168. None.
  1169. Return Value:
  1170. Status code.
  1171. --*/
  1172. {
  1173. ULONGLONG Period;
  1174. KSTATUS Status;
  1175. ASSERT(KeGetRunLevel() == RunLevelLow);
  1176. ASSERT(KeIsQueuedLockHeld(SpProfilingQueuedLock) != FALSE);
  1177. ASSERT(SpMemory == NULL);
  1178. //
  1179. // Allocate the memory profiler structure.
  1180. //
  1181. SpMemory = MmAllocateNonPagedPool(sizeof(MEMORY_PROFILER),
  1182. SP_ALLOCATION_TAG);
  1183. if (SpMemory == NULL) {
  1184. Status = STATUS_INSUFFICIENT_RESOURCES;
  1185. goto InitializeMemoryStatisticsEnd;
  1186. }
  1187. RtlZeroMemory(SpMemory, sizeof(MEMORY_PROFILER));
  1188. ASSERT(SpMemory->ConsumerActive == FALSE);
  1189. ASSERT(SpMemory->ReadyIndex == 0);
  1190. ASSERT(SpMemory->ProducerIndex == 0);
  1191. SpMemory->ConsumerIndex = MEMORY_BUFFER_COUNT - 1;
  1192. //
  1193. // Create the timer that will periodically trigger memory statistics.
  1194. //
  1195. SpMemory->Timer = KeCreateTimer(SP_ALLOCATION_TAG);
  1196. if (SpMemory->Timer == NULL) {
  1197. Status = STATUS_INSUFFICIENT_RESOURCES;
  1198. goto InitializeMemoryStatisticsEnd;
  1199. }
  1200. //
  1201. // Queue the timer.
  1202. //
  1203. Period = KeConvertMicrosecondsToTimeTicks(MEMORY_STATISTICS_TIMER_PERIOD);
  1204. Status = KeQueueTimer(SpMemory->Timer,
  1205. TimerQueueSoft,
  1206. 0,
  1207. Period,
  1208. 0,
  1209. NULL);
  1210. if (!KSUCCESS(Status)) {
  1211. goto InitializeMemoryStatisticsEnd;
  1212. }
  1213. //
  1214. // Create the worker thread, which will wait on the timer. Add an extra
  1215. // reference because the destruction routine waits until this thread exits.
  1216. //
  1217. SpMemory->ThreadAlive = TRUE;
  1218. Status = PsCreateKernelThread(SppMemoryStatisticsThread,
  1219. NULL,
  1220. "SppMemoryStatisticsThread");
  1221. if (!KSUCCESS(Status)) {
  1222. SpMemory->ThreadAlive = FALSE;
  1223. goto InitializeMemoryStatisticsEnd;
  1224. }
  1225. //
  1226. // Make sure everything above is complete before turning this on.
  1227. //
  1228. RtlMemoryBarrier();
  1229. SpEnabledFlags |= PROFILER_TYPE_FLAG_MEMORY_STATISTICS;
  1230. InitializeMemoryStatisticsEnd:
  1231. if (!KSUCCESS(Status)) {
  1232. if (SpMemory != NULL) {
  1233. if (SpMemory->Timer != NULL) {
  1234. KeDestroyTimer(SpMemory->Timer);
  1235. }
  1236. //
  1237. // Thread creation should be the last point of failure.
  1238. //
  1239. ASSERT(SpMemory->ThreadAlive == FALSE);
  1240. MmFreeNonPagedPool(SpMemory);
  1241. SpMemory = NULL;
  1242. }
  1243. }
  1244. return Status;
  1245. }
  1246. VOID
  1247. SppDestroyMemoryStatistics (
  1248. ULONG Phase
  1249. )
  1250. /*++
  1251. Routine Description:
  1252. This routine destroys the structures and timers used to profile system
  1253. memory statistics. Phase 0 stops the memory profiler producers and
  1254. consumers. Phase 1 cleans up resources.
  1255. Arguments:
  1256. Phase - Supplies the current phase of the destruction process.
  1257. Return Value:
  1258. None.
  1259. --*/
  1260. {
  1261. ULONG Index;
  1262. KSTATUS Status;
  1263. ASSERT(KeGetRunLevel() == RunLevelLow);
  1264. ASSERT(KeIsQueuedLockHeld(SpProfilingQueuedLock) != FALSE);
  1265. ASSERT(SpMemory != NULL);
  1266. ASSERT(SpMemory->Timer != NULL);
  1267. if (Phase == 0) {
  1268. ASSERT(SpMemory->ThreadAlive != FALSE);
  1269. ASSERT((SpEnabledFlags & PROFILER_TYPE_FLAG_MEMORY_STATISTICS) != 0);
  1270. //
  1271. // Disable the memory statistics profiler.
  1272. //
  1273. SpEnabledFlags &= ~PROFILER_TYPE_FLAG_MEMORY_STATISTICS;
  1274. //
  1275. // Cancel the timer. This is a periodic timer, so cancel should always
  1276. // succeed.
  1277. //
  1278. Status = KeCancelTimer(SpMemory->Timer);
  1279. ASSERT(KSUCCESS(Status));
  1280. //
  1281. // Queue the timer one more time in case the worker thread was in the
  1282. // act of waiting when the timer was cancelled or was processing data.
  1283. //
  1284. Status = KeQueueTimer(SpMemory->Timer,
  1285. TimerQueueSoftWake,
  1286. 0,
  1287. 0,
  1288. 0,
  1289. NULL);
  1290. ASSERT(KSUCCESS(Status));
  1291. //
  1292. // Wait until the thread exits in order to be sure that it has
  1293. // registered that profiling has been cancelled.
  1294. //
  1295. while (SpMemory->ThreadAlive != FALSE) {
  1296. KeYield();
  1297. }
  1298. } else {
  1299. ASSERT(Phase == 1);
  1300. ASSERT((SpEnabledFlags & PROFILER_TYPE_FLAG_MEMORY_STATISTICS) == 0);
  1301. ASSERT(SpMemory->ThreadAlive == FALSE);
  1302. //
  1303. // Destroy the timer.
  1304. //
  1305. KeDestroyTimer(SpMemory->Timer);
  1306. //
  1307. // Release any buffers that are holding pool statistics.
  1308. //
  1309. for (Index = 0; Index < MEMORY_BUFFER_COUNT; Index += 1) {
  1310. if (SpMemory->MemoryBuffers[Index].Buffer != NULL) {
  1311. MmFreeNonPagedPool(SpMemory->MemoryBuffers[Index].Buffer);
  1312. }
  1313. }
  1314. MmFreeNonPagedPool(SpMemory);
  1315. SpMemory = NULL;
  1316. }
  1317. return;
  1318. }
  1319. VOID
  1320. SppMemoryStatisticsThread (
  1321. PVOID Parameter
  1322. )
  1323. /*++
  1324. Routine Description:
  1325. This routine is the workhorse for gathering memory statistics and writing
  1326. them to a buffer than can then be consumed on the clock interrupt. It waits
  1327. on the memory statistics timer before periodically collecting the
  1328. statistics.
  1329. Arguments:
  1330. Parameter - Supplies a pointer supplied by the creator of the thread. This
  1331. pointer is not used.
  1332. Return Value:
  1333. None.
  1334. --*/
  1335. {
  1336. PVOID Buffer;
  1337. ULONG BufferSize;
  1338. ULONG Index;
  1339. PMEMORY_BUFFER MemoryBuffer;
  1340. KSTATUS Status;
  1341. ASSERT(KeGetRunLevel() == RunLevelLow);
  1342. ASSERT(SpMemory->ThreadAlive != FALSE);
  1343. while (TRUE) {
  1344. //
  1345. // Wait for the memory statistics timer to expire.
  1346. //
  1347. ObWaitOnObject(SpMemory->Timer, 0, WAIT_TIME_INDEFINITE);
  1348. //
  1349. // Check to make sure memory statistics profiling is still enabled.
  1350. //
  1351. if ((SpEnabledFlags & PROFILER_TYPE_FLAG_MEMORY_STATISTICS) == 0) {
  1352. break;
  1353. }
  1354. //
  1355. // Call the memory manager to get the latest pool statistics. It will
  1356. // pass back an appropriately sized buffer with all the statistics.
  1357. //
  1358. Status = MmGetPoolProfilerStatistics(&Buffer,
  1359. &BufferSize,
  1360. SP_ALLOCATION_TAG);
  1361. if (!KSUCCESS(Status)) {
  1362. continue;
  1363. }
  1364. //
  1365. // Get the producer's memory buffer.
  1366. //
  1367. ASSERT(SpMemory->ProducerIndex < MEMORY_BUFFER_COUNT);
  1368. MemoryBuffer = &(SpMemory->MemoryBuffers[SpMemory->ProducerIndex]);
  1369. //
  1370. // Destroy what is currently in the memory buffer.
  1371. //
  1372. if (MemoryBuffer->Buffer != NULL) {
  1373. MmFreeNonPagedPool(MemoryBuffer->Buffer);
  1374. }
  1375. //
  1376. // Reinitialize the buffer.
  1377. //
  1378. MemoryBuffer->Buffer = Buffer;
  1379. MemoryBuffer->BufferSize = BufferSize;
  1380. MemoryBuffer->ConsumerIndex = 0;
  1381. //
  1382. // Now that this is the latest and greatest memory information, point
  1383. // the ready index at it. It doesn't matter that the ready index and
  1384. // the producer index will temporarily be the same. There is a
  1385. // guarantee that the producer will not produce again until it points
  1386. // at a new buffer. This makes it safe for the consumer to just grab
  1387. // the ready index.
  1388. //
  1389. SpMemory->ReadyIndex = SpMemory->ProducerIndex;
  1390. //
  1391. // Now search for the free buffer and make it the producer index. There
  1392. // always has to be one free.
  1393. //
  1394. for (Index = 0; Index < MEMORY_BUFFER_COUNT; Index += 1) {
  1395. if ((Index != SpMemory->ReadyIndex) &&
  1396. (Index != SpMemory->ConsumerIndex)) {
  1397. SpMemory->ProducerIndex = Index;
  1398. break;
  1399. }
  1400. }
  1401. ASSERT(SpMemory->ReadyIndex != SpMemory->ProducerIndex);
  1402. }
  1403. SpMemory->ThreadAlive = FALSE;
  1404. return;
  1405. }
  1406. KSTATUS
  1407. SppInitializeThreadStatistics (
  1408. VOID
  1409. )
  1410. /*++
  1411. Routine Description:
  1412. This routine initializes the system's thread profiling data structures.
  1413. Arguments:
  1414. None.
  1415. Return Value:
  1416. Status code.
  1417. --*/
  1418. {
  1419. ULONG AllocationSize;
  1420. ULONG Index;
  1421. RUNLEVEL OldRunLevel;
  1422. ULONG ProcessorCount;
  1423. ULONG ProcessorNumber;
  1424. PPROFILER_BUFFER ProfilerBuffer;
  1425. KSTATUS Status;
  1426. SYSTEM_TIME SystemTime;
  1427. PPROFILER_BUFFER *ThreadStatisticsArray;
  1428. PROFILER_THREAD_TIME_COUNTER TimeCounterEvent;
  1429. ASSERT(KeGetRunLevel() == RunLevelLow);
  1430. ASSERT((SpEnabledFlags & PROFILER_TYPE_FLAG_THREAD_STATISTICS) == 0);
  1431. ASSERT(KeIsQueuedLockHeld(SpProfilingQueuedLock) != FALSE);
  1432. ASSERT(SpThreadStatisticsArray == NULL);
  1433. ASSERT(SpThreadStatisticsArraySize == 0);
  1434. ProcessorCount = KeGetActiveProcessorCount();
  1435. AllocationSize = ProcessorCount * sizeof(PPROFILER_BUFFER);
  1436. ThreadStatisticsArray = MmAllocateNonPagedPool(AllocationSize,
  1437. SP_ALLOCATION_TAG);
  1438. if (ThreadStatisticsArray == NULL) {
  1439. Status = STATUS_INSUFFICIENT_RESOURCES;
  1440. goto InitializeProfilerEnd;
  1441. }
  1442. //
  1443. // Now fill in the array with profiler buffers.
  1444. //
  1445. RtlZeroMemory(ThreadStatisticsArray, AllocationSize);
  1446. for (Index = 0; Index < ProcessorCount; Index += 1) {
  1447. ProfilerBuffer = MmAllocateNonPagedPool(sizeof(PROFILER_BUFFER),
  1448. SP_ALLOCATION_TAG);
  1449. if (ProfilerBuffer == NULL) {
  1450. Status = STATUS_INSUFFICIENT_RESOURCES;
  1451. goto InitializeProfilerEnd;
  1452. }
  1453. RtlZeroMemory(ProfilerBuffer, sizeof(PROFILER_BUFFER));
  1454. ThreadStatisticsArray[Index] = ProfilerBuffer;
  1455. }
  1456. SpThreadStatisticsArray = ThreadStatisticsArray;
  1457. SpThreadStatisticsArraySize = ProcessorCount;
  1458. //
  1459. // Enable profiling by filling in the function pointer.
  1460. //
  1461. SpCollectThreadStatisticRoutine = SppCollectThreadStatistic;
  1462. SpProcessNewProcessRoutine = SppProcessNewProcess;
  1463. SpProcessNewThreadRoutine = SppProcessNewThread;
  1464. RtlMemoryBarrier();
  1465. SpEnabledFlags |= PROFILER_TYPE_FLAG_THREAD_STATISTICS;
  1466. //
  1467. // Raise to dispatch (so that no thread events are added on this processor)
  1468. // and add the first event, a time counter synchronization event.
  1469. //
  1470. OldRunLevel = KeRaiseRunLevel(RunLevelDispatch);
  1471. ProcessorNumber = KeGetCurrentProcessorNumber();
  1472. TimeCounterEvent.EventType = ProfilerThreadEventTimeCounter;
  1473. TimeCounterEvent.TimeCounter = HlQueryTimeCounter();
  1474. KeGetSystemTime(&SystemTime);
  1475. TimeCounterEvent.SystemTimeSeconds = SystemTime.Seconds;
  1476. TimeCounterEvent.SystemTimeNanoseconds = SystemTime.Nanoseconds;
  1477. TimeCounterEvent.TimeCounterFrequency = HlQueryTimeCounterFrequency();
  1478. SppWriteProfilerBuffer(SpThreadStatisticsArray[ProcessorNumber],
  1479. (BYTE *)&TimeCounterEvent,
  1480. sizeof(PROFILER_THREAD_TIME_COUNTER));
  1481. KeLowerRunLevel(OldRunLevel);
  1482. SppSendInitialProcesses();
  1483. Status = STATUS_SUCCESS;
  1484. InitializeProfilerEnd:
  1485. if (!KSUCCESS(Status)) {
  1486. for (Index = 0; Index < ProcessorCount; Index += 1) {
  1487. if (ThreadStatisticsArray[Index] != NULL) {
  1488. MmFreeNonPagedPool(ThreadStatisticsArray[Index]);
  1489. }
  1490. }
  1491. MmFreeNonPagedPool(ThreadStatisticsArray);
  1492. }
  1493. return Status;
  1494. }
  1495. VOID
  1496. SppSendInitialProcesses (
  1497. VOID
  1498. )
  1499. /*++
  1500. Routine Description:
  1501. This routine sends the initial set of process and threads active on the
  1502. system. This routine must be called at low level.
  1503. Arguments:
  1504. None.
  1505. Return Value:
  1506. None.
  1507. --*/
  1508. {
  1509. BOOL Added;
  1510. ULONG ConsumedSize;
  1511. PPROFILER_THREAD_NEW_PROCESS Event;
  1512. ULONG MaxNameSize;
  1513. PSTR Name;
  1514. ULONG NameSize;
  1515. RUNLEVEL OldRunLevel;
  1516. PPROCESS_INFORMATION Process;
  1517. PPROCESS_INFORMATION ProcessList;
  1518. UINTN ProcessListSize;
  1519. ULONG ProcessorNumber;
  1520. KSTATUS Status;
  1521. KSTATUS ThreadStatus;
  1522. ASSERT(KeGetRunLevel() == RunLevelLow);
  1523. ProcessList = NULL;
  1524. Status = PsGetAllProcessInformation(SP_ALLOCATION_TAG,
  1525. (PVOID)&ProcessList,
  1526. &ProcessListSize);
  1527. if (!KSUCCESS(Status)) {
  1528. goto SendInitialProcessesEnd;
  1529. }
  1530. ConsumedSize = 0;
  1531. MaxNameSize = SCRATCH_BUFFER_LENGTH -
  1532. FIELD_OFFSET(PROFILER_THREAD_NEW_PROCESS, Name);
  1533. Process = ProcessList;
  1534. while (ConsumedSize < ProcessListSize) {
  1535. OldRunLevel = KeRaiseRunLevel(RunLevelDispatch);
  1536. ProcessorNumber = KeGetCurrentProcessorNumber();
  1537. Event = (PVOID)(SpThreadStatisticsArray[ProcessorNumber]->Scratch);
  1538. Event->EventType = ProfilerThreadEventNewProcess;
  1539. NameSize = Process->NameLength * sizeof(CHAR);
  1540. if (NameSize > MaxNameSize) {
  1541. NameSize = MaxNameSize;
  1542. }
  1543. Event->StructureSize = sizeof(PROFILER_THREAD_NEW_PROCESS);
  1544. if (NameSize != 0) {
  1545. Event->StructureSize -= ANYSIZE_ARRAY * sizeof(CHAR);
  1546. Event->StructureSize += NameSize;
  1547. Name = (PSTR)((PVOID)Process + Process->NameOffset);
  1548. RtlStringCopy(Event->Name, Name, NameSize);
  1549. } else {
  1550. Event->Name[0] = STRING_TERMINATOR;
  1551. }
  1552. Event->ProcessId = Process->ProcessId;
  1553. Event->TimeCounter = 0;
  1554. Added = SppWriteProfilerBuffer(SpThreadStatisticsArray[ProcessorNumber],
  1555. (BYTE *)Event,
  1556. Event->StructureSize);
  1557. if (Added == FALSE) {
  1558. Status = STATUS_BUFFER_TOO_SMALL;
  1559. }
  1560. KeLowerRunLevel(OldRunLevel);
  1561. ThreadStatus = SppSendInitialThreads(Process->ProcessId);
  1562. if (!KSUCCESS(ThreadStatus)) {
  1563. Status = ThreadStatus;
  1564. }
  1565. ConsumedSize += Process->StructureSize;
  1566. ASSERT(ConsumedSize <= ProcessListSize);
  1567. Process = (PPROCESS_INFORMATION)((PUCHAR)Process +
  1568. Process->StructureSize);
  1569. }
  1570. Status = STATUS_SUCCESS;
  1571. SendInitialProcessesEnd:
  1572. if (ProcessList != NULL) {
  1573. MmFreeNonPagedPool(ProcessList);
  1574. }
  1575. if (!KSUCCESS(Status)) {
  1576. RtlDebugPrint("Profiler: Failed to send initial processes: %d.\n",
  1577. Status);
  1578. }
  1579. return;
  1580. }
  1581. KSTATUS
  1582. SppSendInitialThreads (
  1583. PROCESS_ID ProcessId
  1584. )
  1585. /*++
  1586. Routine Description:
  1587. This routine sends the initial set of threads for the given process.
  1588. This routine must be called at dispatch level.
  1589. Arguments:
  1590. ProcessId - Supplies the process ID of the threads to send.
  1591. Return Value:
  1592. None.
  1593. --*/
  1594. {
  1595. BOOL Added;
  1596. ULONG ConsumedSize;
  1597. PPROFILER_THREAD_NEW_THREAD Event;
  1598. ULONG MaxNameSize;
  1599. ULONG NameSize;
  1600. RUNLEVEL OldRunLevel;
  1601. ULONG ProcessorNumber;
  1602. KSTATUS Status;
  1603. PTHREAD_INFORMATION Thread;
  1604. PTHREAD_INFORMATION ThreadList;
  1605. ULONG ThreadListSize;
  1606. ASSERT(KeGetRunLevel() == RunLevelLow);
  1607. if ((SpEnabledFlags & PROFILER_TYPE_FLAG_THREAD_STATISTICS) == 0) {
  1608. return STATUS_SUCCESS;
  1609. }
  1610. ThreadList = NULL;
  1611. Status = PsGetThreadList(ProcessId,
  1612. SP_ALLOCATION_TAG,
  1613. (PVOID)&ThreadList,
  1614. &ThreadListSize);
  1615. if (!KSUCCESS(Status)) {
  1616. goto SendInitialThreadsEnd;
  1617. }
  1618. OldRunLevel = KeRaiseRunLevel(RunLevelDispatch);
  1619. ProcessorNumber = KeGetCurrentProcessorNumber();
  1620. Event = (PVOID)(SpThreadStatisticsArray[ProcessorNumber]->Scratch);
  1621. ConsumedSize = 0;
  1622. MaxNameSize = SCRATCH_BUFFER_LENGTH -
  1623. FIELD_OFFSET(PROFILER_THREAD_NEW_THREAD, Name);
  1624. Event->EventType = ProfilerThreadEventNewThread;
  1625. Thread = ThreadList;
  1626. while (ConsumedSize < ThreadListSize) {
  1627. ASSERT(Thread->StructureSize >= sizeof(THREAD_INFORMATION));
  1628. NameSize = Thread->StructureSize -
  1629. FIELD_OFFSET(THREAD_INFORMATION, Name);
  1630. if (NameSize > MaxNameSize) {
  1631. NameSize = MaxNameSize;
  1632. }
  1633. Event->StructureSize = sizeof(PROFILER_THREAD_NEW_THREAD) -
  1634. (ANYSIZE_ARRAY * sizeof(CHAR)) + NameSize;
  1635. Event->ProcessId = ProcessId;
  1636. Event->ThreadId = Thread->ThreadId;
  1637. Event->TimeCounter = 0;
  1638. RtlStringCopy(Event->Name, Thread->Name, NameSize);
  1639. Added = SppWriteProfilerBuffer(SpThreadStatisticsArray[ProcessorNumber],
  1640. (BYTE *)Event,
  1641. Event->StructureSize);
  1642. if (Added == FALSE) {
  1643. Status = STATUS_BUFFER_TOO_SMALL;
  1644. }
  1645. ConsumedSize += Thread->StructureSize;
  1646. ASSERT(ConsumedSize <= ThreadListSize);
  1647. Thread = (PTHREAD_INFORMATION)((PUCHAR)Thread + Thread->StructureSize);
  1648. }
  1649. KeLowerRunLevel(OldRunLevel);
  1650. Status = STATUS_SUCCESS;
  1651. SendInitialThreadsEnd:
  1652. if (ThreadList != NULL) {
  1653. MmFreeNonPagedPool(ThreadList);
  1654. }
  1655. return Status;
  1656. }
  1657. VOID
  1658. SppProcessNewProcess (
  1659. PROCESS_ID ProcessId
  1660. )
  1661. /*++
  1662. Routine Description:
  1663. This routine collects statistics on a created process.
  1664. Arguments:
  1665. ProcessId - Supplies the ID of the process being created.
  1666. Return Value:
  1667. None.
  1668. --*/
  1669. {
  1670. BOOL Added;
  1671. PPROFILER_THREAD_NEW_PROCESS Event;
  1672. ULONG MaxNameSize;
  1673. PSTR Name;
  1674. ULONG NameSize;
  1675. RUNLEVEL OldRunLevel;
  1676. PPROCESS_INFORMATION Process;
  1677. ULONG ProcessorNumber;
  1678. UINTN ProcessSize;
  1679. KSTATUS Status;
  1680. if ((SpEnabledFlags & PROFILER_TYPE_FLAG_THREAD_STATISTICS) == 0) {
  1681. return;
  1682. }
  1683. ProcessSize = PROFILER_PROCESS_INFORMATION_SIZE;
  1684. Process = MmAllocateNonPagedPool(ProcessSize, SP_ALLOCATION_TAG);
  1685. if (Process == NULL) {
  1686. Status = STATUS_INSUFFICIENT_RESOURCES;
  1687. goto ProcessNewProcessEnd;
  1688. }
  1689. Process->Version = PROCESS_INFORMATION_VERSION;
  1690. Status = PsGetProcessInformation(ProcessId, Process, &ProcessSize);
  1691. if (!KSUCCESS(Status)) {
  1692. goto ProcessNewProcessEnd;
  1693. }
  1694. MaxNameSize = SCRATCH_BUFFER_LENGTH -
  1695. FIELD_OFFSET(PROFILER_THREAD_NEW_PROCESS, Name);
  1696. NameSize = Process->NameLength * sizeof(CHAR);
  1697. if (NameSize > MaxNameSize) {
  1698. NameSize = MaxNameSize;
  1699. }
  1700. OldRunLevel = KeRaiseRunLevel(RunLevelDispatch);
  1701. ProcessorNumber = KeGetCurrentProcessorNumber();
  1702. Event = (PVOID)(SpThreadStatisticsArray[ProcessorNumber]->Scratch);
  1703. Event->EventType = ProfilerThreadEventNewProcess;
  1704. Event->StructureSize = sizeof(PROFILER_THREAD_NEW_PROCESS);
  1705. if (NameSize != 0) {
  1706. Event->StructureSize -= ANYSIZE_ARRAY * sizeof(CHAR);
  1707. Event->StructureSize += NameSize;
  1708. Name = (PSTR)((PVOID)Process + Process->NameOffset);
  1709. RtlStringCopy(Event->Name, Name, NameSize);
  1710. } else {
  1711. Event->Name[0] = STRING_TERMINATOR;
  1712. }
  1713. Event->ProcessId = Process->ProcessId;
  1714. Event->TimeCounter = HlQueryTimeCounter();
  1715. Added = SppWriteProfilerBuffer(SpThreadStatisticsArray[ProcessorNumber],
  1716. (BYTE *)Event,
  1717. Event->StructureSize);
  1718. Status = STATUS_SUCCESS;
  1719. if (Added == FALSE) {
  1720. Status = STATUS_BUFFER_TOO_SMALL;
  1721. }
  1722. KeLowerRunLevel(OldRunLevel);
  1723. ProcessNewProcessEnd:
  1724. if (!KSUCCESS(Status)) {
  1725. RtlDebugPrint("Warning: Unable to add profiling event for new "
  1726. "process %d.\n",
  1727. ProcessId);
  1728. }
  1729. if (Process != NULL) {
  1730. MmFreeNonPagedPool(Process);
  1731. }
  1732. return;
  1733. }
  1734. VOID
  1735. SppProcessNewThread (
  1736. PROCESS_ID ProcessId,
  1737. THREAD_ID ThreadId
  1738. )
  1739. /*++
  1740. Routine Description:
  1741. This routine collects statistics on a created thread.
  1742. Arguments:
  1743. ProcessId - Supplies the ID of the process creating the new thread.
  1744. ThreadId - Supplies the ID of the new thread being created.
  1745. Return Value:
  1746. None.
  1747. --*/
  1748. {
  1749. BOOL Added;
  1750. PPROFILER_THREAD_NEW_THREAD Event;
  1751. ULONG NameSize;
  1752. RUNLEVEL OldRunLevel;
  1753. ULONG ProcessorNumber;
  1754. KSTATUS Status;
  1755. PTHREAD_INFORMATION Thread;
  1756. ULONG ThreadSize;
  1757. if ((SpEnabledFlags & PROFILER_TYPE_FLAG_THREAD_STATISTICS) == 0) {
  1758. return;
  1759. }
  1760. ThreadSize = PROFILER_THREAD_INFORMATION_SIZE;
  1761. Thread = MmAllocateNonPagedPool(ThreadSize, SP_ALLOCATION_TAG);
  1762. if (Thread == NULL) {
  1763. Status = STATUS_INSUFFICIENT_RESOURCES;
  1764. goto ProcessNewThreadEnd;
  1765. }
  1766. Status = PsGetThreadInformation(ProcessId, ThreadId, Thread, &ThreadSize);
  1767. if (!KSUCCESS(Status)) {
  1768. goto ProcessNewThreadEnd;
  1769. }
  1770. OldRunLevel = KeRaiseRunLevel(RunLevelDispatch);
  1771. ProcessorNumber = KeGetCurrentProcessorNumber();
  1772. Event = (PVOID)(SpThreadStatisticsArray[ProcessorNumber]->Scratch);
  1773. Event->EventType = ProfilerThreadEventNewThread;
  1774. ASSERT(Thread->StructureSize >= sizeof(THREAD_INFORMATION));
  1775. NameSize = Thread->StructureSize -
  1776. FIELD_OFFSET(THREAD_INFORMATION, Name);
  1777. ASSERT(NameSize < ThreadSize);
  1778. Event->StructureSize = sizeof(PROFILER_THREAD_NEW_THREAD) -
  1779. (ANYSIZE_ARRAY * sizeof(CHAR)) + NameSize;
  1780. Event->ProcessId = ProcessId;
  1781. Event->ThreadId = Thread->ThreadId;
  1782. Event->TimeCounter = HlQueryTimeCounter();
  1783. RtlStringCopy(Event->Name, Thread->Name, NameSize);
  1784. Added = SppWriteProfilerBuffer(SpThreadStatisticsArray[ProcessorNumber],
  1785. (BYTE *)Event,
  1786. Event->StructureSize);
  1787. Status = STATUS_SUCCESS;
  1788. if (Added == FALSE) {
  1789. Status = STATUS_BUFFER_TOO_SMALL;
  1790. }
  1791. KeLowerRunLevel(OldRunLevel);
  1792. ProcessNewThreadEnd:
  1793. if (!KSUCCESS(Status)) {
  1794. RtlDebugPrint("Warning: Unable to add profiling event for new "
  1795. "thread %d (Process %d).\n",
  1796. ThreadId,
  1797. ProcessId);
  1798. }
  1799. if (Thread != NULL) {
  1800. MmFreeNonPagedPool(Thread);
  1801. }
  1802. return;
  1803. }
  1804. VOID
  1805. SppDestroyThreadStatistics (
  1806. ULONG Phase
  1807. )
  1808. /*++
  1809. Routine Description:
  1810. This routine tears down thread profiling. Phase 0 stops the thread
  1811. statistics producers and consumers. Phase 1 cleans up resources.
  1812. Arguments:
  1813. Phase - Supplies the current phase of the destruction process.
  1814. Return Value:
  1815. None.
  1816. --*/
  1817. {
  1818. ULONG Index;
  1819. ASSERT(KeGetRunLevel() == RunLevelLow);
  1820. ASSERT(KeIsQueuedLockHeld(SpProfilingQueuedLock) != FALSE);
  1821. ASSERT(SpThreadStatisticsArray != NULL);
  1822. ASSERT(SpThreadStatisticsArraySize != 0);
  1823. if (Phase == 0) {
  1824. ASSERT((SpEnabledFlags & PROFILER_TYPE_FLAG_THREAD_STATISTICS) != 0);
  1825. //
  1826. // Disable thread statistics before disabling the profiler function to
  1827. // prevent any pending producers from touching the buffers after they
  1828. // are released
  1829. //
  1830. SpEnabledFlags &= ~PROFILER_TYPE_FLAG_THREAD_STATISTICS;
  1831. //
  1832. // Clear the function pointer to officially take the profiling down.
  1833. //
  1834. SpCollectThreadStatisticRoutine = NULL;
  1835. RtlMemoryBarrier();
  1836. } else {
  1837. ASSERT(Phase == 1);
  1838. ASSERT((SpEnabledFlags & PROFILER_TYPE_FLAG_THREAD_STATISTICS) == 0);
  1839. //
  1840. // Destroy the stack sampling array.
  1841. //
  1842. for (Index = 0; Index < SpThreadStatisticsArraySize; Index += 1) {
  1843. if (SpThreadStatisticsArray[Index] != NULL) {
  1844. MmFreeNonPagedPool(SpThreadStatisticsArray[Index]);
  1845. }
  1846. }
  1847. MmFreeNonPagedPool(SpThreadStatisticsArray);
  1848. SpThreadStatisticsArray = NULL;
  1849. SpThreadStatisticsArraySize = 0;
  1850. }
  1851. return;
  1852. }
  1853. VOID
  1854. SppCollectThreadStatistic (
  1855. PKTHREAD Thread,
  1856. PPROCESSOR_BLOCK Processor,
  1857. SCHEDULER_REASON ScheduleOutReason
  1858. )
  1859. /*++
  1860. Routine Description:
  1861. This routine collects statistics on a thread that is being scheduled out.
  1862. This routine must be called at dispatch level inside the scheduler.
  1863. Arguments:
  1864. Thread - Supplies a pointer to the thread being scheduled out.
  1865. Processor - Supplies a pointer to the executing processor block.
  1866. ScheduleOutReason - Supplies the reason the thread is being scheduled out.
  1867. Return Value:
  1868. None.
  1869. --*/
  1870. {
  1871. PPROFILER_CONTEXT_SWAP ContextSwap;
  1872. ULONG ProcessorNumber;
  1873. if ((SpEnabledFlags & PROFILER_TYPE_FLAG_THREAD_STATISTICS) == 0) {
  1874. return;
  1875. }
  1876. //
  1877. // Do not collect data on processors that have not been initialized for
  1878. // profiling.
  1879. //
  1880. if (Processor->ProcessorNumber >= SpThreadStatisticsArraySize) {
  1881. return;
  1882. }
  1883. ProcessorNumber = Processor->ProcessorNumber;
  1884. ASSERT(sizeof(PROFILER_CONTEXT_SWAP) < SCRATCH_BUFFER_LENGTH);
  1885. ContextSwap = (PVOID)(SpThreadStatisticsArray[ProcessorNumber]->Scratch);
  1886. ContextSwap->EventType = ScheduleOutReason;
  1887. ContextSwap->TimeCount = HlQueryTimeCounter();
  1888. ContextSwap->BlockingQueue = (UINTN)NULL;
  1889. if (ScheduleOutReason == SchedulerReasonThreadBlocking) {
  1890. ContextSwap->BlockingQueue = (UINTN)(ObGetBlockingQueue(Thread));
  1891. }
  1892. ContextSwap->ThreadId = Thread->ThreadId;
  1893. ContextSwap->ProcessId = Thread->OwningProcess->Identifiers.ProcessId;
  1894. //
  1895. // Write the data to the sampling buffer.
  1896. //
  1897. SppWriteProfilerBuffer(SpThreadStatisticsArray[ProcessorNumber],
  1898. (BYTE *)ContextSwap,
  1899. sizeof(PROFILER_CONTEXT_SWAP));
  1900. return;
  1901. }