pagecach.c 137 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166
  1. /*++
  2. Copyright (c) 2013 Minoca Corp. All Rights Reserved
  3. Module Name:
  4. pagecach.c
  5. Abstract:
  6. This module implements support for the I/O page cache.
  7. Author:
  8. Chris Stevens 11-Sep-2013
  9. Environment:
  10. Kernel
  11. --*/
  12. //
  13. // ------------------------------------------------------------------- Includes
  14. //
  15. #include <minoca/kernel/kernel.h>
  16. #include "iop.h"
  17. #include "pagecach.h"
  18. //
  19. // ---------------------------------------------------------------- Definitions
  20. //
  21. #define PAGE_CACHE_ALLOCATION_TAG 0x68436750 // 'hCgP'
  22. //
  23. // Define the percentage of the total system physical pages the page cache
  24. // tries to keep free.
  25. //
  26. #define PAGE_CACHE_MEMORY_HEADROOM_PERCENT_TRIGGER 10
  27. #define PAGE_CACHE_MEMORY_HEADROOM_PERCENT_RETREAT 15
  28. //
  29. // Define the target size in percent of total system physical memory that the
  30. // page cache aims for. Below this size, paging out begins in addition to
  31. // shrinking the page cache.
  32. //
  33. #define PAGE_CACHE_MINIMUM_MEMORY_TARGET_PERCENT 33
  34. //
  35. // Define the size in percent of total system physical memory that the page
  36. // cache feels it's entitled to even when memory is tight. Performance simply
  37. // suffers too much if the page cache shrinks to nothing.
  38. //
  39. #define PAGE_CACHE_MINIMUM_MEMORY_PERCENT 7
  40. //
  41. // Define the number of system virtual memory the page cache aims to keep free
  42. // by unmapping page cache entries. This is stored in bytes. There are
  43. // different values for system with small (<4GB) and large (64-bit) system
  44. // virtual memory resources.
  45. //
  46. #define PAGE_CACHE_SMALL_VIRTUAL_HEADROOM_TRIGGER_BYTES (512 * _1MB)
  47. #define PAGE_CACHE_SMALL_VIRTUAL_HEADROOM_RETREAT_BYTES (896 * _1MB)
  48. #define PAGE_CACHE_LARGE_VIRTUAL_HEADROOM_TRIGGER_BYTES (1 * (UINTN)_1GB)
  49. #define PAGE_CACHE_LARGE_VIRTUAL_HEADROOM_RETREAT_BYTES (3 * (UINTN)_1GB)
  50. //
  51. // Set this flag if the page cache entry contains dirty data.
  52. //
  53. #define PAGE_CACHE_ENTRY_FLAG_DIRTY 0x00000001
  54. //
  55. // Set this flag if the page cache entry owns the physical page it uses.
  56. //
  57. #define PAGE_CACHE_ENTRY_FLAG_PAGE_OWNER 0x00000002
  58. //
  59. // Set this flag if the page cache entry is mapped. This needs to be a flag as
  60. // opposed to just a check of the VA so that it can be managed atomically with
  61. // the dirty flag, keeping the "mapped dirty page" count correct. This flag
  62. // is meant to track whether or not a page is counted in the "mapped page
  63. // count", and so it is not set on non page owners.
  64. //
  65. #define PAGE_CACHE_ENTRY_FLAG_MAPPED 0x00000004
  66. //
  67. // Define page cache debug flags.
  68. //
  69. #define PAGE_CACHE_DEBUG_INSERTION 0x00000001
  70. #define PAGE_CACHE_DEBUG_LOOKUP 0x00000002
  71. #define PAGE_CACHE_DEBUG_EVICTION 0x00000004
  72. #define PAGE_CACHE_DEBUG_FLUSH 0x00000008
  73. #define PAGE_CACHE_DEBUG_SIZE_MANAGEMENT 0x00000010
  74. #define PAGE_CACHE_DEBUG_MAPPED_MANAGEMENT 0x00000020
  75. #define PAGE_CACHE_DEBUG_DIRTY_LISTS 0x00000040
  76. //
  77. // Define parameters to help coalesce flushes.
  78. //
  79. #define PAGE_CACHE_FLUSH_MAX _128KB
  80. //
  81. // Define the maximum streak of clean pages the page cache encounters while
  82. // flushing before breaking up a write.
  83. //
  84. #define PAGE_CACHE_FLUSH_MAX_CLEAN_STREAK 4
  85. //
  86. // Define the block expansion count for the page cache entry block allocator.
  87. //
  88. #define PAGE_CACHE_BLOCK_ALLOCATOR_EXPANSION_COUNT 0x40
  89. //
  90. // Define the maximum number of pages that can be used as the minimum number of
  91. // free pages necessary to require page cache flushes to give up in favor of
  92. // removing entries in a low memory situation.
  93. //
  94. #define PAGE_CACHE_LOW_MEMORY_CLEAN_PAGE_MAXIMUM 256
  95. //
  96. // Define the percentage of total physical pages that need to be free before
  97. // the page cache stops cleaning entries to evict entries.
  98. //
  99. #define PAGE_CACHE_LOW_MEMORY_CLEAN_PAGE_MINIMUM_PERCENTAGE 10
  100. //
  101. // Define the portion of the page cache that should be dirty (maximum) as a
  102. // shift.
  103. //
  104. #define PAGE_CACHE_MAX_DIRTY_SHIFT 1
  105. //
  106. // This defines the amount of time the page cache worker will delay until
  107. // executing another cleaning. This allows writes to pool.
  108. //
  109. #define PAGE_CACHE_CLEAN_DELAY_MIN (5000 * MICROSECONDS_PER_MILLISECOND)
  110. //
  111. // --------------------------------------------------------------------- Macros
  112. //
  113. #define IS_IO_OBJECT_TYPE_LINKABLE(_IoObjectType) \
  114. ((_IoObjectType == IoObjectRegularFile) || \
  115. (_IoObjectType == IoObjectSymbolicLink) || \
  116. (_IoObjectType == IoObjectSharedMemoryObject) || \
  117. (_IoObjectType == IoObjectBlockDevice))
  118. //
  119. // ------------------------------------------------------ Data Type Definitions
  120. //
  121. typedef enum _PAGE_CACHE_STATE {
  122. PageCacheStateInvalid,
  123. PageCacheStateClean,
  124. PageCacheStateDirty,
  125. } PAGE_CACHE_STATE, *PPAGE_CACHE_STATE;
  126. /*++
  127. Structure Description:
  128. This structure defines a page cache entry.
  129. Members:
  130. Node - Stores the Red-Black tree node information for this page cache
  131. entry.
  132. ListEntry - Stores this page cache entry's list entry in an LRU list, local
  133. list, or dirty list. This list entry is protected by the global page
  134. cache list lock.
  135. FileObject - Stores a pointer to the file object for the device or file to
  136. which the page cache entry belongs.
  137. Offset - Stores the offset into the file or device of the cached page.
  138. PhysicalAddress - Stores the physical address of the page containing the
  139. cached data.
  140. VirtualAddress - Stores the virtual address of the page containing the
  141. cached data.
  142. BackingEntry - Stores a pointer to a page cache entry that owns the
  143. physical page used by this page cache entry.
  144. ReferenceCount - Stores the number of references on this page cache entry.
  145. Flags - Stores a bitmask of page cache entry flags. See
  146. PAGE_CACHE_ENTRY_FLAG_* for definitions.
  147. --*/
  148. struct _PAGE_CACHE_ENTRY {
  149. RED_BLACK_TREE_NODE Node;
  150. LIST_ENTRY ListEntry;
  151. PFILE_OBJECT FileObject;
  152. IO_OFFSET Offset;
  153. PHYSICAL_ADDRESS PhysicalAddress;
  154. PVOID VirtualAddress;
  155. PPAGE_CACHE_ENTRY BackingEntry;
  156. volatile ULONG ReferenceCount;
  157. volatile ULONG Flags;
  158. };
  159. //
  160. // ----------------------------------------------- Internal Function Prototypes
  161. //
  162. PPAGE_CACHE_ENTRY
  163. IopCreatePageCacheEntry (
  164. PFILE_OBJECT FileObject,
  165. PVOID VirtualAddress,
  166. PHYSICAL_ADDRESS PhysicalAddress,
  167. IO_OFFSET Offset
  168. );
  169. VOID
  170. IopDestroyPageCacheEntries (
  171. PLIST_ENTRY ListHead
  172. );
  173. VOID
  174. IopDestroyPageCacheEntry (
  175. PPAGE_CACHE_ENTRY PageCacheEntry
  176. );
  177. VOID
  178. IopInsertPageCacheEntry (
  179. PPAGE_CACHE_ENTRY NewEntry,
  180. PPAGE_CACHE_ENTRY LinkEntry
  181. );
  182. PPAGE_CACHE_ENTRY
  183. IopLookupPageCacheEntryHelper (
  184. PFILE_OBJECT FileObject,
  185. IO_OFFSET Offset
  186. );
  187. VOID
  188. IopPageCacheThread (
  189. PVOID Parameter
  190. );
  191. KSTATUS
  192. IopFlushPageCacheBuffer (
  193. PIO_BUFFER FlushBuffer,
  194. UINTN FlushSize,
  195. ULONG Flags
  196. );
  197. VOID
  198. IopTrimRemovalPageCacheList (
  199. VOID
  200. );
  201. VOID
  202. IopRemovePageCacheEntriesFromList (
  203. PLIST_ENTRY PageCacheListHead,
  204. PLIST_ENTRY DestroyListHead,
  205. BOOL TimidEffort,
  206. PUINTN TargetRemoveCount
  207. );
  208. VOID
  209. IopTrimPageCacheVirtual (
  210. BOOL TimidEffort
  211. );
  212. BOOL
  213. IopIsIoBufferPageCacheBackedHelper (
  214. PFILE_OBJECT FileObject,
  215. PIO_BUFFER IoBuffer,
  216. IO_OFFSET Offset,
  217. UINTN SizeInBytes
  218. );
  219. KSTATUS
  220. IopUnmapPageCacheEntrySections (
  221. PPAGE_CACHE_ENTRY PageCacheEntry,
  222. PBOOL PageWasDirty
  223. );
  224. KSTATUS
  225. IopRemovePageCacheEntryVirtualAddress (
  226. PPAGE_CACHE_ENTRY Entry,
  227. PVOID *VirtualAddress
  228. );
  229. VOID
  230. IopRemovePageCacheEntryFromTree (
  231. PPAGE_CACHE_ENTRY PageCacheEntry
  232. );
  233. VOID
  234. IopUpdatePageCacheEntryList (
  235. PPAGE_CACHE_ENTRY PageCacheEntry,
  236. BOOL Created
  237. );
  238. BOOL
  239. IopIsPageCacheTooBig (
  240. PUINTN FreePhysicalPages
  241. );
  242. BOOL
  243. IopIsPageCacheTooMapped (
  244. PUINTN FreeVirtualPages
  245. );
  246. VOID
  247. IopCheckFileObjectPageCache (
  248. PFILE_OBJECT FileObject
  249. );
  250. //
  251. // -------------------------------------------------------------------- Globals
  252. //
  253. //
  254. // Stores the list head for the page cache entries that are ordered from least
  255. // to most recently used. This will mostly contain clean entries, but could
  256. // have a few dirty entries on it.
  257. //
  258. LIST_ENTRY IoPageCacheCleanList;
  259. //
  260. // Stores the list head for page cache entries that are clean but not mapped.
  261. // The unmap loop moves entries from the clean list to here to avoid iterating
  262. // over them too many times. These entries are considered even less used than
  263. // the clean list.
  264. //
  265. LIST_ENTRY IoPageCacheCleanUnmappedList;
  266. //
  267. // Stores the list head for the list of page cache entries that are ready to be
  268. // removed from the cache. These could be evicted entries or entries that
  269. // belong to a deleted file. A deleted file's entries are not necessarily
  270. // marked evicted, thus the evicted count is not always equal to the size of
  271. // this list.
  272. //
  273. LIST_ENTRY IoPageCacheRemovalList;
  274. //
  275. // Stores a lock to protect access to the lists of page cache entries.
  276. //
  277. PQUEUED_LOCK IoPageCacheListLock;
  278. //
  279. // The count tracks the current number of entries in the cache's tree. This is
  280. // protected by the tree lock.
  281. //
  282. UINTN IoPageCacheEntryCount = 0;
  283. //
  284. // Store the target number of free pages in the system the page cache shoots
  285. // for once low-memory eviction of page cache entries kicks in.
  286. //
  287. UINTN IoPageCacheHeadroomPagesRetreat = 0;
  288. //
  289. // Store the number of free physical pages in the system at (or below) which
  290. // the page cache will start evicting entries.
  291. //
  292. UINTN IoPageCacheHeadroomPagesTrigger = 0;
  293. //
  294. // Store the size of the page cache (in pages) below which the page cache will
  295. // ask for pages to be paged out in an effort to keep the working set in memory.
  296. //
  297. UINTN IoPageCacheMinimumPagesTarget = 0;
  298. //
  299. // Store the minimum size (in pages) below which the page cache will not
  300. // attempt to shrink.
  301. //
  302. UINTN IoPageCacheMinimumPages = 0;
  303. //
  304. // Store the minimum number of pages that must be clean in a low memory
  305. // scenario before the page cache worker stops flushing entries in favor or
  306. // removing clean entries.
  307. //
  308. UINTN IoPageCacheLowMemoryCleanPageMinimum = 0;
  309. //
  310. // The physical page count tracks the current number of physical pages in use
  311. // by the cache. This includes pages that are active in the tree and pages that
  312. // are not in the tree, awaiting destruction.
  313. //
  314. volatile UINTN IoPageCachePhysicalPageCount = 0;
  315. //
  316. // Stores the number of pages in the cache that are dirty.
  317. //
  318. volatile UINTN IoPageCacheDirtyPageCount = 0;
  319. //
  320. // Stores the number of page cache pages that are currently mapped.
  321. //
  322. volatile UINTN IoPageCacheMappedPageCount = 0;
  323. //
  324. // Stores the number of dirty page cache entries that are currently mapped.
  325. //
  326. volatile UINTN IoPageCacheMappedDirtyPageCount = 0;
  327. //
  328. // Store the target number of free virtual pages in the system the page cache
  329. // shoots for once low-memory unmapping of page cache entries kicks in.
  330. //
  331. UINTN IoPageCacheHeadroomVirtualPagesRetreat = 0;
  332. //
  333. // Store the number of free virtual pages in the system at (or below) which
  334. // the page cache will start unmapping entries.
  335. //
  336. UINTN IoPageCacheHeadroomVirtualPagesTrigger = 0;
  337. //
  338. // Store the maximum number of dirty pages permitted as an absolute page count.
  339. // This is used to avoid creating too much virtual pressure on 32-bit systems.
  340. //
  341. UINTN IoPageCacheMaxDirtyPages = -1;
  342. //
  343. // Store the page cache timer interval.
  344. //
  345. ULONGLONG IoPageCacheCleanInterval;
  346. //
  347. // Store the timer used to trigger the page cache worker.
  348. //
  349. PKTIMER IoPageCacheWorkTimer;
  350. //
  351. // The page cache state records the current state of the cleaning process.
  352. // This is of type PAGE_CACHE_STATE.
  353. //
  354. volatile ULONG IoPageCacheState = PageCacheStateClean;
  355. //
  356. // This stores the last time the page cache was cleaned.
  357. //
  358. INT64_SYNC IoPageCacheLastCleanTime;
  359. //
  360. // Store a bitfield of enabled page cache debug flags. See PAGE_CACHE_DEBUG_*
  361. // for definitions.
  362. //
  363. ULONG IoPageCacheDebugFlags = 0x0;
  364. //
  365. // Store the global page cache entry block allocator.
  366. //
  367. PBLOCK_ALLOCATOR IoPageCacheBlockAllocator;
  368. //
  369. // Store a pointer to the page cache thread itself.
  370. //
  371. PKTHREAD IoPageCacheThread;
  372. //
  373. // Stores a boolean that can be used to disable page cache entries from storing
  374. // virtual addresses.
  375. //
  376. BOOL IoPageCacheDisableVirtualAddresses;
  377. //
  378. // ------------------------------------------------------------------ Functions
  379. //
  380. KERNEL_API
  381. ULONG
  382. IoGetCacheEntryDataSize (
  383. VOID
  384. )
  385. /*++
  386. Routine Description:
  387. This routine returns the size of data stored in each cache entry.
  388. Arguments:
  389. None.
  390. Return Value:
  391. Returns the size of the data stored in each cache entry.
  392. --*/
  393. {
  394. return MmPageSize();
  395. }
  396. KSTATUS
  397. IoGetCacheStatistics (
  398. PIO_CACHE_STATISTICS Statistics
  399. )
  400. /*++
  401. Routine Description:
  402. This routine collects the cache statistics and returns them to the caller.
  403. Arguments:
  404. Statistics - Supplies a pointer that receives the cache statistics. The
  405. caller should zero this buffer beforehand and set the version member to
  406. IO_CACHE_STATISTICS_VERSION. Failure to zero the structure beforehand
  407. may result in uninitialized data when a driver built for a newer OS is
  408. run on an older OS.
  409. Return Value:
  410. Status code.
  411. --*/
  412. {
  413. ULONGLONG LastCleanTime;
  414. if (Statistics->Version < IO_CACHE_STATISTICS_VERSION) {
  415. return STATUS_INVALID_PARAMETER;
  416. }
  417. READ_INT64_SYNC(&IoPageCacheLastCleanTime, &LastCleanTime);
  418. Statistics->EntryCount = IoPageCacheEntryCount;
  419. Statistics->HeadroomPagesTrigger = IoPageCacheHeadroomPagesTrigger;
  420. Statistics->HeadroomPagesRetreat = IoPageCacheHeadroomPagesRetreat;
  421. Statistics->MinimumPagesTarget = IoPageCacheMinimumPagesTarget;
  422. Statistics->PhysicalPageCount = IoPageCachePhysicalPageCount;
  423. Statistics->DirtyPageCount = IoPageCacheDirtyPageCount;
  424. Statistics->LastCleanTime = LastCleanTime;
  425. return STATUS_SUCCESS;
  426. }
  427. VOID
  428. IoPageCacheEntryAddReference (
  429. PPAGE_CACHE_ENTRY PageCacheEntry
  430. )
  431. /*++
  432. Routine Description:
  433. This routine increments the reference count on the given page cache entry.
  434. It is assumed that callers of this routine either hold the page cache lock
  435. or already hold a reference on the given page cache entry.
  436. Arguments:
  437. PageCacheEntry - Supplies a pointer to the page cache entry whose reference
  438. count should be incremented.
  439. Return Value:
  440. None.
  441. --*/
  442. {
  443. ULONG OldReferenceCount;
  444. OldReferenceCount = RtlAtomicAdd32(&(PageCacheEntry->ReferenceCount), 1);
  445. ASSERT(OldReferenceCount < 0x1000);
  446. return;
  447. }
  448. VOID
  449. IoPageCacheEntryReleaseReference (
  450. PPAGE_CACHE_ENTRY PageCacheEntry
  451. )
  452. /*++
  453. Routine Description:
  454. This routine decrements the reference count on the given page cache entry.
  455. Arguments:
  456. PageCacheEntry - Supplies a pointer to the page cache entry whose reference
  457. count should be incremented.
  458. Return Value:
  459. None.
  460. --*/
  461. {
  462. ULONG OldReferenceCount;
  463. OldReferenceCount = RtlAtomicAdd32(&(PageCacheEntry->ReferenceCount), -1);
  464. ASSERT((OldReferenceCount != 0) && (OldReferenceCount < 0x1000));
  465. //
  466. // Potentially insert the page cache entry on the LRU list if the reference
  467. // count just dropped to zero.
  468. //
  469. if ((OldReferenceCount == 1) &&
  470. (PageCacheEntry->ListEntry.Next == NULL) &&
  471. ((PageCacheEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) == 0)) {
  472. KeAcquireQueuedLock(IoPageCacheListLock);
  473. //
  474. // Double check to make sure it's not on a list or dirty now.
  475. //
  476. if ((PageCacheEntry->ListEntry.Next == NULL) &&
  477. ((PageCacheEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) == 0)) {
  478. INSERT_BEFORE(&(PageCacheEntry->ListEntry), &IoPageCacheCleanList);
  479. }
  480. KeReleaseQueuedLock(IoPageCacheListLock);
  481. }
  482. return;
  483. }
  484. PHYSICAL_ADDRESS
  485. IoGetPageCacheEntryPhysicalAddress (
  486. PPAGE_CACHE_ENTRY PageCacheEntry
  487. )
  488. /*++
  489. Routine Description:
  490. This routine returns the physical address of the page cache entry.
  491. Arguments:
  492. PageCacheEntry - Supplies a pointer to a page cache entry.
  493. Return Value:
  494. Returns the physical address of the given page cache entry.
  495. --*/
  496. {
  497. return PageCacheEntry->PhysicalAddress;
  498. }
  499. PVOID
  500. IoGetPageCacheEntryVirtualAddress (
  501. PPAGE_CACHE_ENTRY PageCacheEntry
  502. )
  503. /*++
  504. Routine Description:
  505. This routine gets the given page cache entry's virtual address.
  506. Arguments:
  507. PageCacheEntry - Supplies a pointer to a page cache entry.
  508. Return Value:
  509. Returns the virtual address of the given page cache entry.
  510. --*/
  511. {
  512. PPAGE_CACHE_ENTRY BackingEntry;
  513. PVOID VirtualAddress;
  514. //
  515. // If this page cache entry's virtual address is NULL, but it has a mapped
  516. // backing entry, then synchronize the two.
  517. //
  518. VirtualAddress = PageCacheEntry->VirtualAddress;
  519. BackingEntry = PageCacheEntry->BackingEntry;
  520. ASSERT((VirtualAddress == NULL) ||
  521. (BackingEntry == NULL) ||
  522. (VirtualAddress == BackingEntry->VirtualAddress));
  523. if ((VirtualAddress == NULL) && (BackingEntry != NULL)) {
  524. ASSERT((PageCacheEntry->Flags &
  525. (PAGE_CACHE_ENTRY_FLAG_PAGE_OWNER |
  526. PAGE_CACHE_ENTRY_FLAG_MAPPED)) == 0);
  527. ASSERT((BackingEntry->Flags & PAGE_CACHE_ENTRY_FLAG_PAGE_OWNER) != 0);
  528. //
  529. // Updating the virtual address in the non-backing entry does not need
  530. // to be atomic because any race would be to set it to the same value.
  531. // As only backing entries can be set. It also does not set the mapped
  532. // flag because the backing entry actually owns the page.
  533. //
  534. VirtualAddress = BackingEntry->VirtualAddress;
  535. PageCacheEntry->VirtualAddress = VirtualAddress;
  536. }
  537. return VirtualAddress;
  538. }
  539. BOOL
  540. IoSetPageCacheEntryVirtualAddress (
  541. PPAGE_CACHE_ENTRY PageCacheEntry,
  542. PVOID VirtualAddress
  543. )
  544. /*++
  545. Routine Description:
  546. This routine attempts to set the virtual address in the given page cache
  547. entry. It is assumed that the page cache entry's physical address is mapped
  548. at the given virtual address.
  549. Arguments:
  550. PageCacheEntry - Supplies as pointer to the page cache entry.
  551. VirtualAddress - Supplies the virtual address to set in the page cache
  552. entry.
  553. Return Value:
  554. Returns TRUE if the set succeeds or FALSE if another virtual address is
  555. already set for the page cache entry.
  556. --*/
  557. {
  558. ULONG OldFlags;
  559. BOOL Set;
  560. PPAGE_CACHE_ENTRY UnmappedEntry;
  561. ASSERT((VirtualAddress != NULL) &&
  562. (IS_POINTER_ALIGNED(VirtualAddress, MmPageSize()) != FALSE));
  563. if ((PageCacheEntry->VirtualAddress != NULL) ||
  564. (IoPageCacheDisableVirtualAddresses != FALSE)) {
  565. return FALSE;
  566. }
  567. UnmappedEntry = PageCacheEntry;
  568. if (UnmappedEntry->BackingEntry != NULL) {
  569. UnmappedEntry = UnmappedEntry->BackingEntry;
  570. }
  571. Set = FALSE;
  572. OldFlags = RtlAtomicOr32(&(UnmappedEntry->Flags),
  573. PAGE_CACHE_ENTRY_FLAG_MAPPED);
  574. ASSERT((OldFlags & PAGE_CACHE_ENTRY_FLAG_PAGE_OWNER) != 0);
  575. if ((OldFlags & PAGE_CACHE_ENTRY_FLAG_MAPPED) == 0) {
  576. Set = TRUE;
  577. UnmappedEntry->VirtualAddress = VirtualAddress;
  578. RtlAtomicAdd(&IoPageCacheMappedPageCount, 1);
  579. if ((OldFlags & PAGE_CACHE_ENTRY_FLAG_DIRTY) != 0) {
  580. RtlAtomicAdd(&IoPageCacheMappedDirtyPageCount, 1);
  581. } else {
  582. //
  583. // If it wasn't dirty, it may need to be moved from the
  584. // clean-unmapped list to the clean list.
  585. //
  586. IopUpdatePageCacheEntryList(UnmappedEntry, FALSE);
  587. }
  588. }
  589. //
  590. // Set the original page cache entry too if it's not the one that just took
  591. // the VA.
  592. //
  593. if (UnmappedEntry != PageCacheEntry) {
  594. VirtualAddress = UnmappedEntry->VirtualAddress;
  595. if (VirtualAddress != NULL) {
  596. //
  597. // Everyone racing should be trying to set the same value.
  598. //
  599. ASSERT(((PageCacheEntry->Flags &
  600. PAGE_CACHE_ENTRY_FLAG_MAPPED) == 0) &&
  601. ((PageCacheEntry->VirtualAddress == NULL) ||
  602. (PageCacheEntry->VirtualAddress == VirtualAddress)));
  603. PageCacheEntry->VirtualAddress = VirtualAddress;
  604. }
  605. }
  606. return Set;
  607. }
  608. BOOL
  609. IoMarkPageCacheEntryDirty (
  610. PPAGE_CACHE_ENTRY PageCacheEntry
  611. )
  612. /*++
  613. Routine Description:
  614. This routine marks the given page cache entry as dirty.
  615. Arguments:
  616. PageCacheEntry - Supplies a pointer to a page cache entry.
  617. Return Value:
  618. Returns TRUE if it marked the entry dirty or FALSE if the entry was already
  619. dirty.
  620. --*/
  621. {
  622. PPAGE_CACHE_ENTRY BackingEntry;
  623. PPAGE_CACHE_ENTRY DirtyEntry;
  624. PFILE_OBJECT FileObject;
  625. BOOL MarkedDirty;
  626. //
  627. // Try to get the backing entry if possible.
  628. //
  629. DirtyEntry = PageCacheEntry;
  630. BackingEntry = PageCacheEntry->BackingEntry;
  631. if (BackingEntry != NULL) {
  632. DirtyEntry = BackingEntry;
  633. }
  634. //
  635. // Quick exit if the page cache entry is already dirty.
  636. //
  637. if ((DirtyEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) != 0) {
  638. return FALSE;
  639. }
  640. FileObject = DirtyEntry->FileObject;
  641. KeAcquireSharedExclusiveLockExclusive(FileObject->Lock);
  642. BackingEntry = DirtyEntry->BackingEntry;
  643. //
  644. // Double check the backing entry to make sure the right lock was acquired.
  645. //
  646. if (BackingEntry != NULL) {
  647. ASSERT(DirtyEntry == PageCacheEntry);
  648. KeReleaseSharedExclusiveLockExclusive(FileObject->Lock);
  649. DirtyEntry = BackingEntry;
  650. FileObject = DirtyEntry->FileObject;
  651. KeAcquireSharedExclusiveLockExclusive(FileObject->Lock);
  652. }
  653. MarkedDirty = IopMarkPageCacheEntryDirty(DirtyEntry);
  654. KeReleaseSharedExclusiveLockExclusive(FileObject->Lock);
  655. return MarkedDirty;
  656. }
  657. KSTATUS
  658. IopInitializePageCache (
  659. VOID
  660. )
  661. /*++
  662. Routine Description:
  663. This routine initializes the page cache.
  664. Arguments:
  665. None.
  666. Return Value:
  667. Status code.
  668. --*/
  669. {
  670. PBLOCK_ALLOCATOR BlockAllocator;
  671. ULONGLONG CurrentTime;
  672. ULONG PageShift;
  673. UINTN PhysicalPages;
  674. KSTATUS Status;
  675. UINTN TotalPhysicalPages;
  676. UINTN TotalVirtualMemory;
  677. INITIALIZE_LIST_HEAD(&IoPageCacheCleanList);
  678. INITIALIZE_LIST_HEAD(&IoPageCacheCleanUnmappedList);
  679. INITIALIZE_LIST_HEAD(&IoPageCacheRemovalList);
  680. IoPageCacheListLock = KeCreateQueuedLock();
  681. if (IoPageCacheListLock == NULL) {
  682. Status = STATUS_INSUFFICIENT_RESOURCES;
  683. goto InitializePageCacheEnd;
  684. }
  685. //
  686. // Create a timer to schedule the page cache worker.
  687. //
  688. IoPageCacheWorkTimer = KeCreateTimer(PAGE_CACHE_ALLOCATION_TAG);
  689. if (IoPageCacheWorkTimer == NULL) {
  690. Status = STATUS_INSUFFICIENT_RESOURCES;
  691. goto InitializePageCacheEnd;
  692. }
  693. //
  694. // Create the block allocator for the page cache entry structures.
  695. //
  696. BlockAllocator = MmCreateBlockAllocator(
  697. sizeof(PAGE_CACHE_ENTRY),
  698. 0,
  699. PAGE_CACHE_BLOCK_ALLOCATOR_EXPANSION_COUNT,
  700. BLOCK_ALLOCATOR_FLAG_TRIM,
  701. PAGE_CACHE_ALLOCATION_TAG);
  702. if (BlockAllocator == NULL) {
  703. Status = STATUS_INSUFFICIENT_RESOURCES;
  704. goto InitializePageCacheEnd;
  705. }
  706. IoPageCacheBlockAllocator = BlockAllocator;
  707. //
  708. // Determine an appropriate limit on the size of the page cache based on
  709. // the total number of physical pages.
  710. //
  711. TotalPhysicalPages = MmGetTotalPhysicalPages();
  712. PhysicalPages = (TotalPhysicalPages *
  713. PAGE_CACHE_MEMORY_HEADROOM_PERCENT_RETREAT) /
  714. 100;
  715. if (PhysicalPages > MAX_UINTN) {
  716. PhysicalPages = MAX_UINTN;
  717. }
  718. IoPageCacheHeadroomPagesRetreat = PhysicalPages;
  719. ASSERT(IoPageCacheHeadroomPagesRetreat > 0);
  720. PhysicalPages = (TotalPhysicalPages *
  721. PAGE_CACHE_MEMORY_HEADROOM_PERCENT_TRIGGER) /
  722. 100;
  723. if (PhysicalPages > MAX_UINTN) {
  724. PhysicalPages = MAX_UINTN;
  725. }
  726. IoPageCacheHeadroomPagesTrigger = PhysicalPages;
  727. ASSERT(IoPageCacheHeadroomPagesTrigger > 0);
  728. PhysicalPages = (TotalPhysicalPages *
  729. PAGE_CACHE_MINIMUM_MEMORY_TARGET_PERCENT) /
  730. 100;
  731. if (PhysicalPages > MAX_UINTN) {
  732. PhysicalPages = MAX_UINTN;
  733. }
  734. IoPageCacheMinimumPagesTarget = PhysicalPages;
  735. PhysicalPages = (TotalPhysicalPages * PAGE_CACHE_MINIMUM_MEMORY_PERCENT) /
  736. 100;
  737. if (PhysicalPages > MAX_UINTN) {
  738. PhysicalPages = MAX_UINTN;
  739. }
  740. IoPageCacheMinimumPages = PhysicalPages;
  741. PhysicalPages = (TotalPhysicalPages *
  742. PAGE_CACHE_LOW_MEMORY_CLEAN_PAGE_MINIMUM_PERCENTAGE) /
  743. 100;
  744. if (PhysicalPages > MAX_UINTN) {
  745. PhysicalPages = MAX_UINTN;
  746. }
  747. IoPageCacheLowMemoryCleanPageMinimum = PhysicalPages;
  748. ASSERT(IoPageCacheLowMemoryCleanPageMinimum > 0);
  749. if (IoPageCacheLowMemoryCleanPageMinimum >
  750. PAGE_CACHE_LOW_MEMORY_CLEAN_PAGE_MAXIMUM) {
  751. IoPageCacheLowMemoryCleanPageMinimum =
  752. PAGE_CACHE_LOW_MEMORY_CLEAN_PAGE_MAXIMUM;
  753. }
  754. //
  755. // Determine an appropriate limit on the amount of virtual memory the page
  756. // cache is allowed to consume based on the total amount of system virtual
  757. // memory.
  758. //
  759. PageShift = MmPageShift();
  760. TotalVirtualMemory = MmGetTotalVirtualMemory();
  761. if (TotalVirtualMemory < MAX_ULONG) {
  762. IoPageCacheHeadroomVirtualPagesTrigger =
  763. PAGE_CACHE_SMALL_VIRTUAL_HEADROOM_TRIGGER_BYTES >> PageShift;
  764. IoPageCacheHeadroomVirtualPagesRetreat =
  765. PAGE_CACHE_SMALL_VIRTUAL_HEADROOM_RETREAT_BYTES >> PageShift;
  766. IoPageCacheMaxDirtyPages = ((MAX_UINTN - (UINTN)KERNEL_VA_START + 1) /
  767. 2) >> PageShift;
  768. } else {
  769. IoPageCacheHeadroomVirtualPagesTrigger =
  770. PAGE_CACHE_LARGE_VIRTUAL_HEADROOM_TRIGGER_BYTES >> PageShift;
  771. IoPageCacheHeadroomVirtualPagesRetreat =
  772. PAGE_CACHE_LARGE_VIRTUAL_HEADROOM_RETREAT_BYTES >> PageShift;
  773. }
  774. IoPageCacheCleanInterval =
  775. KeConvertMicrosecondsToTimeTicks(PAGE_CACHE_CLEAN_DELAY_MIN);
  776. CurrentTime = HlQueryTimeCounter();
  777. WRITE_INT64_SYNC(&IoPageCacheLastCleanTime, CurrentTime);
  778. //
  779. // With success on the horizon, create a thread to handle the background
  780. // page cache entry removal and flushing work.
  781. //
  782. Status = PsCreateKernelThread(IopPageCacheThread,
  783. NULL,
  784. "IopPageCacheThread");
  785. if (!KSUCCESS(Status)) {
  786. goto InitializePageCacheEnd;
  787. }
  788. Status = STATUS_SUCCESS;
  789. InitializePageCacheEnd:
  790. if (!KSUCCESS(Status)) {
  791. if (IoPageCacheListLock != NULL) {
  792. KeDestroyQueuedLock(IoPageCacheListLock);
  793. IoPageCacheListLock = NULL;
  794. }
  795. if (IoPageCacheWorkTimer != NULL) {
  796. KeDestroyTimer(IoPageCacheWorkTimer);
  797. IoPageCacheWorkTimer = NULL;
  798. }
  799. if (IoPageCacheBlockAllocator != NULL) {
  800. MmDestroyBlockAllocator(IoPageCacheBlockAllocator);
  801. IoPageCacheBlockAllocator = NULL;
  802. }
  803. }
  804. return Status;
  805. }
  806. PPAGE_CACHE_ENTRY
  807. IopLookupPageCacheEntry (
  808. PFILE_OBJECT FileObject,
  809. IO_OFFSET Offset
  810. )
  811. /*++
  812. Routine Description:
  813. This routine searches for a page cache entry based on the file object and
  814. offset. If found, this routine takes a reference on the page cache entry.
  815. Arguments:
  816. FileObject - Supplies a pointer to a file object for the device or file.
  817. Offset - Supplies an offset into the file or device.
  818. Return Value:
  819. Returns a pointer to the found page cache entry on success, or NULL on
  820. failure.
  821. --*/
  822. {
  823. PPAGE_CACHE_ENTRY FoundEntry;
  824. ASSERT(KeIsSharedExclusiveLockHeld(FileObject->Lock));
  825. FoundEntry = IopLookupPageCacheEntryHelper(FileObject, Offset);
  826. //
  827. // If the entry was found for a write operation, then update its list
  828. // information.
  829. //
  830. if (FoundEntry != NULL) {
  831. IopUpdatePageCacheEntryList(FoundEntry, FALSE);
  832. }
  833. if ((IoPageCacheDebugFlags & PAGE_CACHE_DEBUG_LOOKUP) != 0) {
  834. if (FoundEntry != NULL) {
  835. RtlDebugPrint("PAGE CACHE: Lookup for file object (0x%08x) at "
  836. "offset 0x%I64x succeeded: cache entry 0x%08x, "
  837. "physical address 0x%I64x, reference count %d, "
  838. "flags 0x%08x.\n",
  839. FileObject,
  840. Offset,
  841. FoundEntry,
  842. FoundEntry->PhysicalAddress,
  843. FoundEntry->ReferenceCount,
  844. FoundEntry->Flags);
  845. } else {
  846. RtlDebugPrint("PAGE CACHE: Lookup for file object (0x08%x) at "
  847. "offset 0x%I64x failed.\n",
  848. FileObject,
  849. Offset);
  850. }
  851. }
  852. return FoundEntry;
  853. }
  854. PPAGE_CACHE_ENTRY
  855. IopCreateOrLookupPageCacheEntry (
  856. PFILE_OBJECT FileObject,
  857. PVOID VirtualAddress,
  858. PHYSICAL_ADDRESS PhysicalAddress,
  859. IO_OFFSET Offset,
  860. PPAGE_CACHE_ENTRY LinkEntry,
  861. PBOOL EntryCreated
  862. )
  863. /*++
  864. Routine Description:
  865. This routine creates a page cache entry and inserts it into the cache. Or,
  866. if a page cache entry already exists for the supplied file object and
  867. offset, it returns the existing entry. The file object lock must be held
  868. exclusive already.
  869. Arguments:
  870. FileObject - Supplies a pointer to a file object for the device or file
  871. that owns the contents of the physical page.
  872. VirtualAddress - Supplies an optional virtual address of the page.
  873. PhysicalAddress - Supplies the physical address of the page.
  874. Offset - Supplies the offset into the file or device where the page is
  875. from.
  876. LinkEntry - Supplies an optional pointer to a page cache entry that is
  877. to share the physical address with this new page cache entry if it gets
  878. inserted.
  879. EntryCreated - Supplies an optional pointer that receives a boolean
  880. indicating whether or not a new page cache entry was created.
  881. Return Value:
  882. Returns a pointer to a page cache entry on success, or NULL on failure.
  883. --*/
  884. {
  885. BOOL Created;
  886. PPAGE_CACHE_ENTRY ExistingCacheEntry;
  887. PPAGE_CACHE_ENTRY PageCacheEntry;
  888. ASSERT(KeIsSharedExclusiveLockHeldExclusive(FileObject->Lock));
  889. ASSERT((LinkEntry == NULL) ||
  890. (LinkEntry->PhysicalAddress == PhysicalAddress));
  891. Created = FALSE;
  892. //
  893. // Allocate and initialize a new page cache entry.
  894. //
  895. PageCacheEntry = IopCreatePageCacheEntry(FileObject,
  896. VirtualAddress,
  897. PhysicalAddress,
  898. Offset);
  899. if (PageCacheEntry == NULL) {
  900. goto CreateOrLookupPageCacheEntryEnd;
  901. }
  902. //
  903. // Try to insert the entry. If someone else beat this to the punch, then
  904. // use the existing cache entry.
  905. //
  906. ExistingCacheEntry = IopLookupPageCacheEntryHelper(FileObject, Offset);
  907. if (ExistingCacheEntry == NULL) {
  908. IopInsertPageCacheEntry(PageCacheEntry, LinkEntry);
  909. Created = TRUE;
  910. }
  911. //
  912. // If an existing entry was found, then release the allocated entry.
  913. //
  914. if (Created == FALSE) {
  915. ASSERT(PageCacheEntry->ReferenceCount == 1);
  916. PageCacheEntry->ReferenceCount = 0;
  917. IopDestroyPageCacheEntry(PageCacheEntry);
  918. PageCacheEntry = ExistingCacheEntry;
  919. }
  920. //
  921. // Put the page cache entry on the appropriate list.
  922. //
  923. IopUpdatePageCacheEntryList(PageCacheEntry, Created);
  924. if ((IoPageCacheDebugFlags & PAGE_CACHE_DEBUG_INSERTION) != 0) {
  925. if (Created != FALSE) {
  926. RtlDebugPrint("PAGE CACHE: Inserted new entry for file object "
  927. "(0x%08x) at offset 0x%I64x: cache entry 0x%08x, "
  928. "physical address 0x%I64x, reference count %d, "
  929. "flags 0x%08x.\n",
  930. FileObject,
  931. Offset,
  932. PageCacheEntry,
  933. PageCacheEntry->PhysicalAddress,
  934. PageCacheEntry->ReferenceCount,
  935. PageCacheEntry->Flags);
  936. } else {
  937. RtlDebugPrint("PAGE CACHE: Insert found existing entry for file "
  938. "object (0x%08x) at offset 0x%I64x: cache entry "
  939. "0x%08x, physical address 0x%I64x, reference count "
  940. "%d, flags 0x%08x.\n",
  941. FileObject,
  942. Offset,
  943. PageCacheEntry,
  944. PageCacheEntry->PhysicalAddress,
  945. PageCacheEntry->ReferenceCount,
  946. PageCacheEntry->Flags);
  947. }
  948. }
  949. CreateOrLookupPageCacheEntryEnd:
  950. if (EntryCreated != NULL) {
  951. *EntryCreated = Created;
  952. }
  953. return PageCacheEntry;
  954. }
  955. PPAGE_CACHE_ENTRY
  956. IopCreateAndInsertPageCacheEntry (
  957. PFILE_OBJECT FileObject,
  958. PVOID VirtualAddress,
  959. PHYSICAL_ADDRESS PhysicalAddress,
  960. IO_OFFSET Offset,
  961. PPAGE_CACHE_ENTRY LinkEntry
  962. )
  963. /*++
  964. Routine Description:
  965. This routine creates a page cache entry and inserts it into the cache. The
  966. caller should be certain that there is not another entry in the cache for
  967. the same file object and offset and that nothing else is in contention to
  968. create the same entry.
  969. Arguments:
  970. FileObject - Supplies a pointer to a file object for the device or file
  971. that owns the contents of the physical page.
  972. VirtualAddress - Supplies an optional virtual address of the page.
  973. PhysicalAddress - Supplies the physical address of the page.
  974. Offset - Supplies the offset into the file or device where the page is
  975. from.
  976. LinkEntry - Supplies an optional pointer to a page cache entry that is to
  977. share the physical address with the new page cache entry.
  978. Return Value:
  979. Returns a pointer to a page cache entry on success, or NULL on failure.
  980. --*/
  981. {
  982. PPAGE_CACHE_ENTRY PageCacheEntry;
  983. ASSERT(KeIsSharedExclusiveLockHeldExclusive(FileObject->Lock) != FALSE);
  984. ASSERT((LinkEntry == NULL) ||
  985. (LinkEntry->PhysicalAddress == PhysicalAddress));
  986. //
  987. // Allocate and initialize a new page cache entry.
  988. //
  989. PageCacheEntry = IopCreatePageCacheEntry(FileObject,
  990. VirtualAddress,
  991. PhysicalAddress,
  992. Offset);
  993. if (PageCacheEntry == NULL) {
  994. goto CreateAndInsertPageCacheEntryEnd;
  995. }
  996. //
  997. // Insert the entry. Nothing should beat this to the punch.
  998. //
  999. ASSERT(IopLookupPageCacheEntryHelper(FileObject, Offset) == NULL);
  1000. IopInsertPageCacheEntry(PageCacheEntry, LinkEntry);
  1001. //
  1002. // Add the newly created page cach entry to the appropriate list.
  1003. //
  1004. IopUpdatePageCacheEntryList(PageCacheEntry, TRUE);
  1005. if ((IoPageCacheDebugFlags & PAGE_CACHE_DEBUG_INSERTION) != 0) {
  1006. RtlDebugPrint("PAGE CACHE: Inserted new entry for file object "
  1007. "(0x%08x) at offset 0x%I64x: cache entry 0x%08x, "
  1008. "physical address 0x%I64x, reference count %d, "
  1009. "flags 0x%08x.\n",
  1010. FileObject,
  1011. Offset,
  1012. PageCacheEntry,
  1013. PageCacheEntry->PhysicalAddress,
  1014. PageCacheEntry->ReferenceCount,
  1015. PageCacheEntry->Flags);
  1016. }
  1017. CreateAndInsertPageCacheEntryEnd:
  1018. return PageCacheEntry;
  1019. }
  1020. KSTATUS
  1021. IopCopyAndCacheIoBuffer (
  1022. PFILE_OBJECT FileObject,
  1023. IO_OFFSET FileOffset,
  1024. PIO_BUFFER Destination,
  1025. UINTN CopySize,
  1026. PIO_BUFFER Source,
  1027. UINTN SourceSize,
  1028. UINTN SourceCopyOffset,
  1029. PUINTN BytesCopied
  1030. )
  1031. /*++
  1032. Routine Description:
  1033. This routine iterates over the source buffer, caching each page and copying
  1034. the pages to the destination buffer starting at the given copy offsets and
  1035. up to the given copy size. The file object lock must be held exclusive
  1036. already.
  1037. Arguments:
  1038. FileObject - Supplies a pointer to the file object for the device or file
  1039. that owns the data.
  1040. FileOffset - Supplies an offset into the file that corresponds to the
  1041. beginning of the source I/O buffer.
  1042. Destination - Supplies a pointer to the destination I/O buffer.
  1043. CopySize - Supplies the maximum number of bytes that can be copied
  1044. to the destination.
  1045. Source - Supplies a pointer to the source I/O buffer.
  1046. SourceSize - Supplies the number of bytes in the source that should be
  1047. cached.
  1048. SourceCopyOffset - Supplies the offset into the source buffer where the
  1049. copy to the destination should start.
  1050. BytesCopied - Supplies a pointer that receives the number of bytes copied
  1051. to the destination buffer.
  1052. Return Value:
  1053. Status code.
  1054. --*/
  1055. {
  1056. BOOL Created;
  1057. PIO_BUFFER_FRAGMENT Fragment;
  1058. UINTN FragmentIndex;
  1059. UINTN FragmentOffset;
  1060. PPAGE_CACHE_ENTRY PageCacheEntry;
  1061. ULONG PageSize;
  1062. PHYSICAL_ADDRESS PhysicalAddress;
  1063. PPAGE_CACHE_ENTRY SourceEntry;
  1064. UINTN SourceOffset;
  1065. KSTATUS Status;
  1066. PVOID VirtualAddress;
  1067. *BytesCopied = 0;
  1068. PageSize = MmPageSize();
  1069. ASSERT(KeIsSharedExclusiveLockHeldExclusive(FileObject->Lock) != FALSE);
  1070. ASSERT(IS_ALIGNED(SourceSize, PageSize) != FALSE);
  1071. ASSERT(IS_ALIGNED(CopySize, PageSize) != FALSE);
  1072. Fragment = Source->Fragment;
  1073. FragmentIndex = 0;
  1074. FragmentOffset = 0;
  1075. SourceOffset = 0;
  1076. while (SourceSize != 0) {
  1077. ASSERT(FragmentIndex < Source->FragmentCount);
  1078. ASSERT(IS_ALIGNED(Fragment->Size, PageSize) != FALSE);
  1079. //
  1080. // If the source buffer is already backed by a page cache entry at the
  1081. // current offset, then this new page cache entry should try to
  1082. // reference that entry. Otherwise, it will directly own the physical
  1083. // page.
  1084. //
  1085. SourceEntry = MmGetIoBufferPageCacheEntry(Source, SourceOffset);
  1086. PhysicalAddress = Fragment->PhysicalAddress + FragmentOffset;
  1087. ASSERT((SourceEntry == NULL) ||
  1088. (SourceEntry->PhysicalAddress == PhysicalAddress));
  1089. //
  1090. // Find a virtual address for the page cache entry that is about to be
  1091. // created. Prefer the address in the source's page cache entry, but
  1092. // also use the source I/O buffer's virtual address if present.
  1093. //
  1094. VirtualAddress = NULL;
  1095. if (SourceEntry != NULL) {
  1096. VirtualAddress = SourceEntry->VirtualAddress;
  1097. }
  1098. if ((VirtualAddress == NULL) && (Fragment->VirtualAddress != NULL)) {
  1099. VirtualAddress = Fragment->VirtualAddress + FragmentOffset;
  1100. //
  1101. // If there is a source page cache entry and it had no VA, it is
  1102. // current mapped at the determined VA. Transfer ownership to the
  1103. // page cache entry.
  1104. //
  1105. if (SourceEntry != NULL) {
  1106. IoSetPageCacheEntryVirtualAddress(SourceEntry, VirtualAddress);
  1107. }
  1108. }
  1109. //
  1110. // Try to create a page cache entry for this fragment of the source.
  1111. //
  1112. PageCacheEntry = IopCreateOrLookupPageCacheEntry(FileObject,
  1113. VirtualAddress,
  1114. PhysicalAddress,
  1115. FileOffset,
  1116. SourceEntry,
  1117. &Created);
  1118. if (PageCacheEntry == NULL) {
  1119. Status = STATUS_INSUFFICIENT_RESOURCES;
  1120. goto CopyAndCacheIoBufferEnd;
  1121. }
  1122. //
  1123. // If a cache entry was created for this physical page and the source
  1124. // was not already backed by the page cache, then the source buffer
  1125. // needs to take a reference on it. Otherwise the source buffer will
  1126. // incorrectly free this physical page. Initialize the source buffer at
  1127. // this offset with the created page cache entry.
  1128. //
  1129. if ((Created != FALSE) && (SourceEntry == NULL)) {
  1130. MmSetIoBufferPageCacheEntry(Source, SourceOffset, PageCacheEntry);
  1131. }
  1132. //
  1133. // If the source offset equals the copy offset, and there is more
  1134. // to "copy", initialize the destination buffer with this page
  1135. // cache entry.
  1136. //
  1137. if ((SourceOffset == SourceCopyOffset) && (CopySize != 0)) {
  1138. MmIoBufferAppendPage(Destination,
  1139. PageCacheEntry,
  1140. NULL,
  1141. INVALID_PHYSICAL_ADDRESS);
  1142. SourceCopyOffset += PageSize;
  1143. CopySize -= PageSize;
  1144. *BytesCopied += PageSize;
  1145. }
  1146. //
  1147. // Always release the reference taken by create or lookup. The I/O
  1148. // buffer initialization routines took the necessary references.
  1149. //
  1150. IoPageCacheEntryReleaseReference(PageCacheEntry);
  1151. FileOffset += PageSize;
  1152. SourceOffset += PageSize;
  1153. SourceSize -= PageSize;
  1154. FragmentOffset += PageSize;
  1155. //
  1156. // If the end of this fragment has been reached, moved to the next.
  1157. //
  1158. if (FragmentOffset == Fragment->Size) {
  1159. Fragment += 1;
  1160. FragmentIndex += 1;
  1161. FragmentOffset = 0;
  1162. }
  1163. }
  1164. Status = STATUS_SUCCESS;
  1165. CopyAndCacheIoBufferEnd:
  1166. return Status;
  1167. }
  1168. KSTATUS
  1169. IopFlushPageCacheEntries (
  1170. PFILE_OBJECT FileObject,
  1171. IO_OFFSET Offset,
  1172. ULONGLONG Size,
  1173. ULONG Flags,
  1174. PUINTN PageCount
  1175. )
  1176. /*++
  1177. Routine Description:
  1178. This routine flushes the page cache entries for the given file object
  1179. starting at the given offset for the requested size. This routine does not
  1180. return until all file data has successfully been written to disk. It does
  1181. not guarantee that file meta-data has been flushed to disk.
  1182. Arguments:
  1183. FileObject - Supplies a pointer to a file object for the device or file.
  1184. Offset - Supplies the offset from the beginning of the file or device where
  1185. the flush should be done.
  1186. Size - Supplies the size, in bytes, of the region to flush. Supply a value
  1187. of -1 to flush from the given offset to the end of the file.
  1188. Flags - Supplies a bitmask of I/O flags. See IO_FLAG_* for definitions.
  1189. PageCount - Supplies an optional pointer describing how many pages to flush.
  1190. On output this value will be decreased by the number of pages actually
  1191. flushed. Supply NULL to flush all pages in the size range.
  1192. Return Value:
  1193. Status code.
  1194. --*/
  1195. {
  1196. PPAGE_CACHE_ENTRY BackingEntry;
  1197. BOOL BytesFlushed;
  1198. PPAGE_CACHE_ENTRY CacheEntry;
  1199. UINTN CleanStreak;
  1200. PIO_BUFFER FlushBuffer;
  1201. IO_OFFSET FlushNextOffset;
  1202. UINTN FlushSize;
  1203. LIST_ENTRY LocalList;
  1204. PRED_BLACK_TREE_NODE Node;
  1205. BOOL PageCacheThread;
  1206. UINTN PagesFlushed;
  1207. ULONG PageShift;
  1208. ULONG PageSize;
  1209. PAGE_CACHE_ENTRY SearchEntry;
  1210. BOOL SkipEntry;
  1211. KSTATUS Status;
  1212. KSTATUS TotalStatus;
  1213. PageCacheThread = FALSE;
  1214. BytesFlushed = FALSE;
  1215. CacheEntry = NULL;
  1216. FlushBuffer = NULL;
  1217. PagesFlushed = 0;
  1218. PageShift = MmPageShift();
  1219. Status = STATUS_SUCCESS;
  1220. TotalStatus = STATUS_SUCCESS;
  1221. INITIALIZE_LIST_HEAD(&LocalList);
  1222. if (KeGetCurrentThread() == IoPageCacheThread) {
  1223. PageCacheThread = TRUE;
  1224. ASSERT(KeIsSharedExclusiveLockHeldShared(FileObject->Lock));
  1225. }
  1226. ASSERT((Size == -1ULL) || ((Offset + Size) > Offset));
  1227. //
  1228. // Optimistically mark the file object clean.
  1229. //
  1230. if ((Offset == 0) && (Size == -1ULL) && (PageCount == NULL)) {
  1231. RtlAtomicAnd32(&(FileObject->Flags), ~FILE_OBJECT_FLAG_DIRTY_DATA);
  1232. }
  1233. if (IO_IS_FILE_OBJECT_CACHEABLE(FileObject) == FALSE) {
  1234. goto FlushPageCacheEntriesEnd;
  1235. }
  1236. //
  1237. // Quickly exit if there is nothing to flush.
  1238. //
  1239. if (LIST_EMPTY(&(FileObject->DirtyPageList)) != FALSE) {
  1240. goto FlushPageCacheEntriesEnd;
  1241. }
  1242. //
  1243. // Allocate a buffer to support the maximum allowed flush size.
  1244. //
  1245. FlushBuffer = MmAllocateUninitializedIoBuffer(PAGE_CACHE_FLUSH_MAX, 0);
  1246. if (FlushBuffer == NULL) {
  1247. Status = STATUS_INSUFFICIENT_RESOURCES;
  1248. goto FlushPageCacheEntriesEnd;
  1249. }
  1250. PageSize = MmPageSize();
  1251. ASSERT(KeIsSharedExclusiveLockHeld(FileObject->Lock));
  1252. //
  1253. // Determine which page cache entry the flush should start on.
  1254. //
  1255. SearchEntry.FileObject = FileObject;
  1256. FlushNextOffset = Offset;
  1257. FlushSize = 0;
  1258. CleanStreak = 0;
  1259. SearchEntry.Offset = Offset;
  1260. SearchEntry.Flags = 0;
  1261. Node = NULL;
  1262. //
  1263. // Loop over page cache entries. For flush-all operations, iteration
  1264. // grabs the first entry in the dirty list, then iterates using the
  1265. // tree to maximize contiguous runs. Starting from the list avoids chewing
  1266. // up CPU time scanning through the tree. For explicit flush operations of
  1267. // a specific region, iterate using only the tree.
  1268. //
  1269. if ((Size != -1ULL) || (Offset != 0)) {
  1270. Node = RtlRedBlackTreeSearchClosest(&(FileObject->PageCacheTree),
  1271. &(SearchEntry.Node),
  1272. TRUE);
  1273. //
  1274. // Move all dirty entries over to a local list to avoid processing them
  1275. // many times over.
  1276. //
  1277. } else {
  1278. KeAcquireQueuedLock(IoPageCacheListLock);
  1279. if (!LIST_EMPTY(&(FileObject->DirtyPageList))) {
  1280. MOVE_LIST(&(FileObject->DirtyPageList), &LocalList);
  1281. INITIALIZE_LIST_HEAD(&(FileObject->DirtyPageList));
  1282. }
  1283. KeReleaseQueuedLock(IoPageCacheListLock);
  1284. }
  1285. while (TRUE) {
  1286. //
  1287. // Traverse along the tree if there's already a node.
  1288. //
  1289. if (Node != NULL) {
  1290. Node = RtlRedBlackTreeGetNextNode(&(FileObject->PageCacheTree),
  1291. FALSE,
  1292. Node);
  1293. }
  1294. if ((Node == NULL) && (Size == -1ULL) && (Offset == 0)) {
  1295. KeAcquireQueuedLock(IoPageCacheListLock);
  1296. if (!LIST_EMPTY(&LocalList)) {
  1297. CacheEntry = LIST_VALUE(LocalList.Next,
  1298. PAGE_CACHE_ENTRY,
  1299. ListEntry);
  1300. Node = &(CacheEntry->Node);
  1301. //
  1302. // The node might have been pulled from the tree while the file
  1303. // object lock was dropped, but that routine didn't yet get
  1304. // far enough to pull it off the list. Do it for them.
  1305. //
  1306. if (Node->Parent == NULL) {
  1307. LIST_REMOVE(&(CacheEntry->ListEntry));
  1308. CacheEntry->ListEntry.Next = NULL;
  1309. Node = NULL;
  1310. continue;
  1311. }
  1312. }
  1313. KeReleaseQueuedLock(IoPageCacheListLock);
  1314. }
  1315. //
  1316. // Stop if there's nothing left.
  1317. //
  1318. if (Node == NULL) {
  1319. break;
  1320. }
  1321. CacheEntry = RED_BLACK_TREE_VALUE(Node, PAGE_CACHE_ENTRY, Node);
  1322. if ((Size != -1ULL) && (CacheEntry->Offset >= (Offset + Size))) {
  1323. break;
  1324. }
  1325. //
  1326. // Determine if the current entry can be skipped.
  1327. //
  1328. SkipEntry = FALSE;
  1329. BackingEntry = CacheEntry->BackingEntry;
  1330. ASSERT(CacheEntry->FileObject == FileObject);
  1331. //
  1332. // If the entry is clean, then it can probably be skipped.
  1333. //
  1334. if ((CacheEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) == 0) {
  1335. SkipEntry = TRUE;
  1336. //
  1337. // If this is a synchronized flush and the backing entry is dirty,
  1338. // then write it out.
  1339. //
  1340. if (((Flags & IO_FLAG_DATA_SYNCHRONIZED) != 0) &&
  1341. (BackingEntry != NULL) &&
  1342. ((BackingEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) != 0)) {
  1343. SkipEntry = FALSE;
  1344. }
  1345. //
  1346. // A certain number of clean pages will be tolerated to batch up
  1347. // writes.
  1348. //
  1349. if ((FlushSize != 0) &&
  1350. (CacheEntry->Offset == FlushNextOffset) &&
  1351. (CleanStreak < PAGE_CACHE_FLUSH_MAX_CLEAN_STREAK)) {
  1352. CleanStreak += 1;
  1353. SkipEntry = FALSE;
  1354. }
  1355. //
  1356. // If the entry is not within the bounds of the provided offset and
  1357. // size then it can be skipped.
  1358. //
  1359. } else {
  1360. if ((CacheEntry->Offset + PageSize) <= Offset) {
  1361. SkipEntry = TRUE;
  1362. } else if ((Size != -1ULL) &&
  1363. (CacheEntry->Offset >= (Offset + Size))) {
  1364. SkipEntry = TRUE;
  1365. }
  1366. //
  1367. // If it's dirty and it counts, then reset any clean streaks.
  1368. //
  1369. if (SkipEntry == FALSE) {
  1370. CleanStreak = 0;
  1371. }
  1372. }
  1373. //
  1374. // Potentially move to the next set of entries.
  1375. //
  1376. if (SkipEntry != FALSE) {
  1377. if ((Size == -1ULL) && (Offset == 0)) {
  1378. Node = NULL;
  1379. }
  1380. continue;
  1381. }
  1382. PagesFlushed += 1;
  1383. //
  1384. // Add the cache entry to the flush buffer if necessary,
  1385. // potentially looping again to try to add more pages.
  1386. //
  1387. if ((FlushSize == 0) || (CacheEntry->Offset == FlushNextOffset)) {
  1388. MmIoBufferAppendPage(FlushBuffer,
  1389. CacheEntry,
  1390. NULL,
  1391. INVALID_PHYSICAL_ADDRESS);
  1392. FlushSize += PageSize;
  1393. FlushNextOffset = CacheEntry->Offset + PageSize;
  1394. if (FlushSize < PAGE_CACHE_FLUSH_MAX) {
  1395. continue;
  1396. }
  1397. //
  1398. // Clear out the cache entry to indicate it's been handled.
  1399. //
  1400. CacheEntry = NULL;
  1401. }
  1402. ASSERT(FlushSize != 0);
  1403. //
  1404. // No need to flush any trailing clean entries on the end.
  1405. //
  1406. ASSERT(FlushSize > (CleanStreak << PageShift));
  1407. FlushSize -= CleanStreak << PageShift;
  1408. //
  1409. // Flush the buffer. Note that for block devices this does drop and
  1410. // reacquire the file object lock.
  1411. //
  1412. Status = IopFlushPageCacheBuffer(FlushBuffer, FlushSize, Flags);
  1413. if (!KSUCCESS(Status)) {
  1414. TotalStatus = Status;
  1415. } else {
  1416. BytesFlushed = TRUE;
  1417. }
  1418. //
  1419. // Prepare the flush buffer to be used again.
  1420. //
  1421. MmResetIoBuffer(FlushBuffer);
  1422. FlushSize = 0;
  1423. CleanStreak = 0;
  1424. //
  1425. // Stop if enough pages were flushed.
  1426. //
  1427. if ((PageCount != NULL) && (PagesFlushed >= *PageCount)) {
  1428. break;
  1429. }
  1430. //
  1431. // If this cache entry has not been dealt with, add it to the buffer
  1432. // now.
  1433. //
  1434. if (CacheEntry != NULL) {
  1435. MmIoBufferAppendPage(FlushBuffer,
  1436. CacheEntry,
  1437. NULL,
  1438. INVALID_PHYSICAL_ADDRESS);
  1439. FlushSize = PageSize;
  1440. FlushNextOffset = CacheEntry->Offset + PageSize;
  1441. //
  1442. // Reset the iteration for a completely blank slate.
  1443. //
  1444. } else {
  1445. if ((Size == -1ULL) && (Offset == 0)) {
  1446. Node = NULL;
  1447. }
  1448. }
  1449. //
  1450. // The lock may have been dropped during the flush, so the node might
  1451. // be ripped out of the list. Handle that if it was.
  1452. //
  1453. if ((Node != NULL) && (Node->Parent == NULL)) {
  1454. if ((Offset == 0) && (Size == -1ULL)) {
  1455. Node = NULL;
  1456. } else {
  1457. CacheEntry = RED_BLACK_TREE_VALUE(Node, PAGE_CACHE_ENTRY, Node);
  1458. Node = RtlRedBlackTreeSearchClosest(
  1459. &(FileObject->PageCacheTree),
  1460. &(CacheEntry->Node),
  1461. TRUE);
  1462. }
  1463. }
  1464. //
  1465. // If this is an attempt to flush the entire cache, check on the memory
  1466. // warning level, it may be necessary to stop the flush and evict some
  1467. // entries. Only do this if the minimum number of pages have been
  1468. // cleaned.
  1469. //
  1470. if (PageCacheThread != FALSE) {
  1471. if ((IopIsPageCacheTooBig(NULL) != FALSE) &&
  1472. ((IoPageCachePhysicalPageCount -
  1473. IoPageCacheDirtyPageCount) >
  1474. IoPageCacheLowMemoryCleanPageMinimum)) {
  1475. Status = STATUS_TRY_AGAIN;
  1476. goto FlushPageCacheEntriesEnd;
  1477. }
  1478. }
  1479. }
  1480. //
  1481. // If the loop completed and there was something left to flush, do it now.
  1482. //
  1483. ASSERT(FlushSize >= (CleanStreak << PageShift));
  1484. FlushSize -= CleanStreak << PageShift;
  1485. if (FlushSize != 0) {
  1486. Status = IopFlushPageCacheBuffer(FlushBuffer, FlushSize, Flags);
  1487. if (!KSUCCESS(Status)) {
  1488. TotalStatus = Status;
  1489. } else {
  1490. BytesFlushed = TRUE;
  1491. }
  1492. }
  1493. //
  1494. // Validate the dirty lists if the debug flag is set. This is very slow,
  1495. // and should only be turned on if actively debugging missing dirty page
  1496. // cache pages.
  1497. //
  1498. if ((IoPageCacheDebugFlags & PAGE_CACHE_DEBUG_DIRTY_LISTS) != 0) {
  1499. IopCheckFileObjectPageCache(FileObject);
  1500. }
  1501. Status = STATUS_SUCCESS;
  1502. FlushPageCacheEntriesEnd:
  1503. //
  1504. // If there are still entries on the local list, put those back on the
  1505. // dirty list.
  1506. //
  1507. if (!LIST_EMPTY(&LocalList)) {
  1508. KeAcquireQueuedLock(IoPageCacheListLock);
  1509. APPEND_LIST(&LocalList, &(FileObject->DirtyPageList));
  1510. KeReleaseQueuedLock(IoPageCacheListLock);
  1511. }
  1512. if ((!KSUCCESS(Status)) && (KSUCCESS(TotalStatus))) {
  1513. TotalStatus = Status;
  1514. }
  1515. //
  1516. // If writing to a disk and the synchronized flag is not set, then
  1517. // a sync operation will need to be performed on this disk.
  1518. //
  1519. if ((BytesFlushed != FALSE) &&
  1520. (FileObject->Properties.Type == IoObjectBlockDevice) &&
  1521. ((Flags & IO_FLAG_DATA_SYNCHRONIZED) == 0)) {
  1522. Status = IopSynchronizeBlockDevice(FileObject->Device);
  1523. if (!KSUCCESS(Status)) {
  1524. TotalStatus = Status;
  1525. }
  1526. }
  1527. if (FlushBuffer != NULL) {
  1528. MmFreeIoBuffer(FlushBuffer);
  1529. }
  1530. if (PageCount != NULL) {
  1531. if (PagesFlushed > *PageCount) {
  1532. *PageCount = 0;
  1533. } else {
  1534. *PageCount -= PagesFlushed;
  1535. }
  1536. }
  1537. //
  1538. // Mark the file object as dirty if something went wrong.
  1539. //
  1540. if (!KSUCCESS(TotalStatus)) {
  1541. IopMarkFileObjectDirty(FileObject);
  1542. }
  1543. return TotalStatus;
  1544. }
  1545. VOID
  1546. IopEvictPageCacheEntries (
  1547. PFILE_OBJECT FileObject,
  1548. IO_OFFSET Offset,
  1549. ULONG Flags
  1550. )
  1551. /*++
  1552. Routine Description:
  1553. This routine attempts to evict the page cache entries for a given file or
  1554. device, as specified by the file object. The flags specify how aggressive
  1555. this routine should be. The file object lock must already be held exclusive.
  1556. Arguments:
  1557. FileObject - Supplies a pointer to a file object for the device or file.
  1558. Flags - Supplies a bitmask of eviction flags. See
  1559. PAGE_CACHE_EVICTION_FLAG_* for definitions.
  1560. Offset - Supplies the starting offset into the file or device after which
  1561. all page cache entries should be evicted.
  1562. Return Value:
  1563. None.
  1564. --*/
  1565. {
  1566. PPAGE_CACHE_ENTRY CacheEntry;
  1567. LIST_ENTRY DestroyListHead;
  1568. PRED_BLACK_TREE_NODE Node;
  1569. PAGE_CACHE_ENTRY SearchEntry;
  1570. ASSERT(KeIsSharedExclusiveLockHeldExclusive(FileObject->Lock) != FALSE);
  1571. if (IO_IS_FILE_OBJECT_CACHEABLE(FileObject) == FALSE) {
  1572. return;
  1573. }
  1574. if ((IoPageCacheDebugFlags & PAGE_CACHE_DEBUG_EVICTION) != 0) {
  1575. RtlDebugPrint("PAGE CACHE: Evicting entries for file object "
  1576. "(0x%08x): type %d, reference count %d, path count "
  1577. "%d, offset 0x%I64x.\n",
  1578. FileObject,
  1579. FileObject->Properties.Type,
  1580. FileObject->ReferenceCount,
  1581. FileObject->PathEntryCount,
  1582. Offset);
  1583. }
  1584. //
  1585. // The tree is being modified, so the file object lock must be held
  1586. // exclusively.
  1587. //
  1588. ASSERT(KeIsSharedExclusiveLockHeldExclusive(FileObject->Lock));
  1589. //
  1590. // Quickly exit if there is nothing to evict.
  1591. //
  1592. if (RED_BLACK_TREE_EMPTY(&(FileObject->PageCacheTree)) != FALSE) {
  1593. return;
  1594. }
  1595. //
  1596. // Iterate over the file object's tree of page cache entries.
  1597. //
  1598. INITIALIZE_LIST_HEAD(&DestroyListHead);
  1599. //
  1600. // Find the page cache entry in the file object's tree that is closest (but
  1601. // greater than or equal) to the given eviction offset.
  1602. //
  1603. SearchEntry.FileObject = FileObject;
  1604. SearchEntry.Offset = Offset;
  1605. SearchEntry.Flags = 0;
  1606. Node = RtlRedBlackTreeSearchClosest(&(FileObject->PageCacheTree),
  1607. &(SearchEntry.Node),
  1608. TRUE);
  1609. while (Node != NULL) {
  1610. CacheEntry = LIST_VALUE(Node, PAGE_CACHE_ENTRY, Node);
  1611. Node = RtlRedBlackTreeGetNextNode(&(FileObject->PageCacheTree),
  1612. FALSE,
  1613. Node);
  1614. //
  1615. // Assert this is a cache entry after the eviction offset.
  1616. //
  1617. ASSERT(CacheEntry->Offset >= Offset);
  1618. //
  1619. // If no flags were provided to indicate more forceful behavior, just
  1620. // do a best effort. If there is a reference on the cache entry, that
  1621. // is not from the flush worker (i.e. the busy flag), then skip it.
  1622. //
  1623. if ((Flags == 0) && (CacheEntry->ReferenceCount != 0)) {
  1624. if ((IoPageCacheDebugFlags & PAGE_CACHE_DEBUG_EVICTION) != 0) {
  1625. RtlDebugPrint("PAGE CACHE: Skip evicting entry 0x%08x: file "
  1626. "object 0x%08x, offset 0x%I64x, physical "
  1627. "address 0x%I64x, reference count %d, flags "
  1628. "0x%08x.\n",
  1629. CacheEntry,
  1630. CacheEntry->FileObject,
  1631. CacheEntry->Offset,
  1632. CacheEntry->PhysicalAddress,
  1633. CacheEntry->ReferenceCount,
  1634. CacheEntry->Flags);
  1635. }
  1636. continue;
  1637. }
  1638. //
  1639. // Clean the page to keep the statistics accurate. It's been evicted
  1640. // and will not be written out. Don't move it to the clean list, as
  1641. // this routine will place it on either the evicted list or the local
  1642. // destroy list.
  1643. //
  1644. IopMarkPageCacheEntryClean(CacheEntry, FALSE);
  1645. //
  1646. // If this is a delete operation, then there should not be any open
  1647. // handles for this file object. Therefore there should be no I/O
  1648. // buffers with references to this file object's page cache entries.
  1649. // Truncate is different, as there may be outstanding handles.
  1650. //
  1651. ASSERT(((Flags & PAGE_CACHE_EVICTION_FLAG_DELETE) == 0) ||
  1652. (CacheEntry->ReferenceCount == 0));
  1653. //
  1654. // Remove the node from the page cache tree. It should not be found on
  1655. // look-up again.
  1656. //
  1657. ASSERT(CacheEntry->Node.Parent != NULL);
  1658. IopRemovePageCacheEntryFromTree(CacheEntry);
  1659. //
  1660. // Remove the cache entry from its current list. If there are no
  1661. // references, it can be destroyed now. Otherwise, stick it on the list
  1662. // to be destroyed later.
  1663. //
  1664. KeAcquireQueuedLock(IoPageCacheListLock);
  1665. ASSERT((CacheEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) == 0);
  1666. if (CacheEntry->ListEntry.Next != NULL) {
  1667. LIST_REMOVE(&(CacheEntry->ListEntry));
  1668. }
  1669. if (CacheEntry->ReferenceCount == 0) {
  1670. INSERT_BEFORE(&(CacheEntry->ListEntry), &DestroyListHead);
  1671. } else {
  1672. INSERT_BEFORE(&(CacheEntry->ListEntry), &IoPageCacheRemovalList);
  1673. }
  1674. KeReleaseQueuedLock(IoPageCacheListLock);
  1675. }
  1676. //
  1677. // With the evicted page cache entries removed from the cache, loop through
  1678. // and destroy them. This gets called by truncate and device removal, so
  1679. // releasing the last file object reference and generating additional I/O
  1680. // here should be okay (this should not be in a recursive I/O path).
  1681. //
  1682. IopDestroyPageCacheEntries(&DestroyListHead);
  1683. return;
  1684. }
  1685. BOOL
  1686. IopIsIoBufferPageCacheBacked (
  1687. PFILE_OBJECT FileObject,
  1688. PIO_BUFFER IoBuffer,
  1689. IO_OFFSET Offset,
  1690. UINTN SizeInBytes
  1691. )
  1692. /*++
  1693. Routine Description:
  1694. This routine determines whether or not the given I/O buffer with data
  1695. targeting the given file object at the given offset is currently backed by
  1696. the page cache, up to the given size. The caller is expected to synchronize
  1697. with eviction via truncate.
  1698. Arguments:
  1699. FileObject - Supplies a pointer to a file object.
  1700. IoBuffer - Supplies a pointer to an I/O buffer.
  1701. Offset - Supplies an offset into the file or device object.
  1702. SizeInBytes - Supplies the number of bytes in the I/O buffer that should be
  1703. cache backed.
  1704. Return Value:
  1705. Returns TRUE if the I/O buffer is backed by valid page cache entries, or
  1706. FALSE otherwise.
  1707. --*/
  1708. {
  1709. BOOL Backed;
  1710. ULONG PageSize;
  1711. ASSERT(IoBuffer->FragmentCount != 0);
  1712. //
  1713. // It is assumed that if the first page of the I/O buffer is backed by the
  1714. // page cache then all pages are backed by the page cache.
  1715. //
  1716. PageSize = MmPageSize();
  1717. Backed = IopIsIoBufferPageCacheBackedHelper(FileObject,
  1718. IoBuffer,
  1719. Offset,
  1720. PageSize);
  1721. //
  1722. // Assert that the assumption above is correct.
  1723. //
  1724. ASSERT((Backed == FALSE) ||
  1725. (IopIsIoBufferPageCacheBackedHelper(FileObject,
  1726. IoBuffer,
  1727. Offset,
  1728. ALIGN_RANGE_UP(SizeInBytes,
  1729. PageSize))));
  1730. return Backed;
  1731. }
  1732. VOID
  1733. IopSchedulePageCacheThread (
  1734. VOID
  1735. )
  1736. /*++
  1737. Routine Description:
  1738. This routine schedules a cleaning of the page cache for some time in the
  1739. future.
  1740. Arguments:
  1741. None.
  1742. Return Value:
  1743. None.
  1744. --*/
  1745. {
  1746. PAGE_CACHE_STATE OldState;
  1747. KSTATUS Status;
  1748. //
  1749. // Do a quick exit check without the atomic first.
  1750. //
  1751. if (IoPageCacheState == PageCacheStateDirty) {
  1752. return;
  1753. }
  1754. //
  1755. // Try to take the state from clean to dirty. If this thread won, then
  1756. // queue the timer.
  1757. //
  1758. OldState = RtlAtomicCompareExchange32(&IoPageCacheState,
  1759. PageCacheStateDirty,
  1760. PageCacheStateClean);
  1761. if (OldState == PageCacheStateClean) {
  1762. ASSERT(IoPageCacheCleanInterval != 0);
  1763. Status = KeQueueTimer(IoPageCacheWorkTimer,
  1764. TimerQueueSoftWake,
  1765. 0,
  1766. IoPageCacheCleanInterval,
  1767. 0,
  1768. NULL);
  1769. ASSERT(KSUCCESS(Status));
  1770. }
  1771. return;
  1772. }
  1773. IO_OFFSET
  1774. IopGetPageCacheEntryOffset (
  1775. PPAGE_CACHE_ENTRY PageCacheEntry
  1776. )
  1777. /*++
  1778. Routine Description:
  1779. This routine gets the file or device offset of the given page cache entry.
  1780. Arguments:
  1781. PageCacheEntry - Supplies a pointer to a page cache entry.
  1782. Return Value:
  1783. Returns the file or device offset of the given page cache entry.
  1784. --*/
  1785. {
  1786. return PageCacheEntry->Offset;
  1787. }
  1788. BOOL
  1789. IopMarkPageCacheEntryClean (
  1790. PPAGE_CACHE_ENTRY PageCacheEntry,
  1791. BOOL MoveToCleanList
  1792. )
  1793. /*++
  1794. Routine Description:
  1795. This routine marks the given page cache entry as clean.
  1796. Arguments:
  1797. PageCacheEntry - Supplies a pointer to a page cache entry.
  1798. MoveToCleanList - Supplies a boolean indicating if the page cache entry
  1799. should be moved to the list of clean page cache entries.
  1800. Return Value:
  1801. Returns TRUE if it marked the entry clean or FALSE if the entry was already
  1802. clean.
  1803. --*/
  1804. {
  1805. BOOL MarkedClean;
  1806. ULONG OldFlags;
  1807. //
  1808. // Quick exit check before banging around atomically.
  1809. //
  1810. if ((PageCacheEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) == 0) {
  1811. return FALSE;
  1812. }
  1813. //
  1814. // Marking a page cache entry clean requires having a reference on the
  1815. // entry or holding the tree lock.
  1816. //
  1817. ASSERT((PageCacheEntry->ReferenceCount != 0) ||
  1818. (KeIsSharedExclusiveLockHeld(PageCacheEntry->FileObject->Lock)));
  1819. OldFlags = RtlAtomicAnd32(&(PageCacheEntry->Flags),
  1820. ~PAGE_CACHE_ENTRY_FLAG_DIRTY);
  1821. //
  1822. // Return that this routine marked the page clean based on the old value.
  1823. // Additional decrement the dirty page count if this entry owns the page.
  1824. //
  1825. if ((OldFlags & PAGE_CACHE_ENTRY_FLAG_DIRTY) != 0) {
  1826. ASSERT((OldFlags & PAGE_CACHE_ENTRY_FLAG_PAGE_OWNER) != 0);
  1827. RtlAtomicAdd(&IoPageCacheDirtyPageCount, (UINTN)-1);
  1828. if ((OldFlags & PAGE_CACHE_ENTRY_FLAG_MAPPED) != 0) {
  1829. RtlAtomicAdd(&IoPageCacheMappedDirtyPageCount, (UINTN)-1);
  1830. }
  1831. MarkedClean = TRUE;
  1832. //
  1833. // Remove the entry from the dirty list.
  1834. //
  1835. KeAcquireQueuedLock(IoPageCacheListLock);
  1836. ASSERT((PageCacheEntry->ListEntry.Next != NULL) &&
  1837. ((PageCacheEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) == 0));
  1838. LIST_REMOVE(&(PageCacheEntry->ListEntry));
  1839. PageCacheEntry->ListEntry.Next = NULL;
  1840. //
  1841. // If requested, move the page cache entry to the back of the LRU list;
  1842. // assume that this page has been fairly recently used on account of it
  1843. // having been dirty. If the page is already on a list, then leave it
  1844. // as its current location.
  1845. //
  1846. if (MoveToCleanList != FALSE) {
  1847. INSERT_BEFORE(&(PageCacheEntry->ListEntry), &IoPageCacheCleanList);
  1848. }
  1849. KeReleaseQueuedLock(IoPageCacheListLock);
  1850. } else {
  1851. MarkedClean = FALSE;
  1852. }
  1853. return MarkedClean;
  1854. }
  1855. BOOL
  1856. IopMarkPageCacheEntryDirty (
  1857. PPAGE_CACHE_ENTRY PageCacheEntry
  1858. )
  1859. /*++
  1860. Routine Description:
  1861. This routine marks the given page cache entry as dirty. The file object
  1862. lock must already be held.
  1863. Arguments:
  1864. PageCacheEntry - Supplies a pointer to a page cache entry.
  1865. Return Value:
  1866. Returns TRUE if it marked the entry dirty or FALSE if the entry was already
  1867. dirty.
  1868. --*/
  1869. {
  1870. PPAGE_CACHE_ENTRY DirtyEntry;
  1871. PFILE_OBJECT FileObject;
  1872. BOOL MarkedDirty;
  1873. ULONG OldFlags;
  1874. //
  1875. // If this page cache entry does not own the physical page then directly
  1876. // mark the backing entry dirty. This causes the system to skip the flush
  1877. // at this page cache entry's layer.
  1878. //
  1879. if ((PageCacheEntry->Flags & PAGE_CACHE_ENTRY_FLAG_PAGE_OWNER) == 0) {
  1880. ASSERT((PageCacheEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) == 0);
  1881. DirtyEntry = PageCacheEntry->BackingEntry;
  1882. } else {
  1883. DirtyEntry = PageCacheEntry;
  1884. }
  1885. //
  1886. // Quick exit check before banging around atomically.
  1887. //
  1888. if ((DirtyEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) != 0) {
  1889. return FALSE;
  1890. }
  1891. FileObject = DirtyEntry->FileObject;
  1892. if (DirtyEntry != PageCacheEntry) {
  1893. KeAcquireSharedExclusiveLockExclusive(FileObject->Lock);
  1894. }
  1895. OldFlags = RtlAtomicOr32(&(DirtyEntry->Flags), PAGE_CACHE_ENTRY_FLAG_DIRTY);
  1896. ASSERT((OldFlags & PAGE_CACHE_ENTRY_FLAG_PAGE_OWNER) != 0);
  1897. if ((OldFlags & PAGE_CACHE_ENTRY_FLAG_DIRTY) == 0) {
  1898. ASSERT((DirtyEntry->VirtualAddress == PageCacheEntry->VirtualAddress) ||
  1899. (PageCacheEntry->VirtualAddress == NULL));
  1900. RtlAtomicAdd(&IoPageCacheDirtyPageCount, 1);
  1901. if ((OldFlags & PAGE_CACHE_ENTRY_FLAG_MAPPED) != 0) {
  1902. RtlAtomicAdd(&IoPageCacheMappedDirtyPageCount, 1);
  1903. }
  1904. MarkedDirty = TRUE;
  1905. //
  1906. // Remove the page cache entry from the clean LRU if it's on one.
  1907. //
  1908. KeAcquireQueuedLock(IoPageCacheListLock);
  1909. if (DirtyEntry->ListEntry.Next != NULL) {
  1910. LIST_REMOVE(&(DirtyEntry->ListEntry));
  1911. }
  1912. //
  1913. // Add it to the dirty page list of the file object.
  1914. //
  1915. INSERT_BEFORE(&(DirtyEntry->ListEntry),
  1916. &(FileObject->DirtyPageList));
  1917. KeReleaseQueuedLock(IoPageCacheListLock);
  1918. IopMarkFileObjectDirty(DirtyEntry->FileObject);
  1919. } else {
  1920. MarkedDirty = FALSE;
  1921. }
  1922. if (DirtyEntry != PageCacheEntry) {
  1923. KeReleaseSharedExclusiveLockExclusive(FileObject->Lock);
  1924. }
  1925. return MarkedDirty;
  1926. }
  1927. KSTATUS
  1928. IopCopyIoBufferToPageCacheEntry (
  1929. PPAGE_CACHE_ENTRY PageCacheEntry,
  1930. ULONG PageOffset,
  1931. PIO_BUFFER SourceBuffer,
  1932. UINTN SourceOffset,
  1933. ULONG ByteCount
  1934. )
  1935. /*++
  1936. Routine Description:
  1937. This routine copies up to a page from the given source buffer to the given
  1938. page cache entry.
  1939. Arguments:
  1940. PageCacheEntry - Supplies a pointer to a page cache entry.
  1941. PageOffset - Supplies an offset into the page where the copy should begin.
  1942. SourceBuffer - Supplies a pointer to the source buffer where the data
  1943. originates.
  1944. SourceOffset - Supplies an offset into the the source buffer where the data
  1945. copy should begin.
  1946. ByteCount - Supplies the number of bytes to copy.
  1947. Return Value:
  1948. Status code.
  1949. --*/
  1950. {
  1951. IO_BUFFER PageCacheBuffer;
  1952. KSTATUS Status;
  1953. //
  1954. // Initialize the I/O buffer with the page cache entry. This takes an
  1955. // additional reference on the page cache entry.
  1956. //
  1957. Status = MmInitializeIoBuffer(&PageCacheBuffer,
  1958. NULL,
  1959. INVALID_PHYSICAL_ADDRESS,
  1960. 0,
  1961. IO_BUFFER_FLAG_KERNEL_MODE_DATA);
  1962. if (!KSUCCESS(Status)) {
  1963. goto CopyIoBufferToPageCacheEntryEnd;
  1964. }
  1965. MmIoBufferAppendPage(&PageCacheBuffer,
  1966. PageCacheEntry,
  1967. NULL,
  1968. INVALID_PHYSICAL_ADDRESS);
  1969. //
  1970. // Copy the contents of the source to the page cache entry.
  1971. //
  1972. Status = MmCopyIoBuffer(&PageCacheBuffer,
  1973. PageOffset,
  1974. SourceBuffer,
  1975. SourceOffset,
  1976. ByteCount);
  1977. if (!KSUCCESS(Status)) {
  1978. goto CopyIoBufferToPageCacheEntryEnd;
  1979. }
  1980. IopMarkPageCacheEntryDirty(PageCacheEntry);
  1981. CopyIoBufferToPageCacheEntryEnd:
  1982. MmFreeIoBuffer(&PageCacheBuffer);
  1983. return Status;
  1984. }
  1985. BOOL
  1986. IopCanLinkPageCacheEntry (
  1987. PPAGE_CACHE_ENTRY PageCacheEntry,
  1988. PFILE_OBJECT FileObject
  1989. )
  1990. /*++
  1991. Routine Description:
  1992. This routine determines if the given page cache entry could link with a
  1993. page cache entry for the given file object.
  1994. Arguments:
  1995. PageCacheEntry - Supplies a pointer to a page cache entry.
  1996. FileObject - Supplies a pointer to a file object.
  1997. Return Value:
  1998. Returns TRUE if the page cache entry could be linked to a page cache entry
  1999. with the given file object or FALSE otherwise.
  2000. --*/
  2001. {
  2002. IO_OBJECT_TYPE PageCacheType;
  2003. ASSERT(IO_IS_FILE_OBJECT_CACHEABLE(FileObject) != FALSE);
  2004. PageCacheType = PageCacheEntry->FileObject->Properties.Type;
  2005. if (IS_IO_OBJECT_TYPE_LINKABLE(PageCacheType) == FALSE) {
  2006. return FALSE;
  2007. }
  2008. if (FileObject->Properties.Type == PageCacheType) {
  2009. return FALSE;
  2010. }
  2011. return TRUE;
  2012. }
  2013. BOOL
  2014. IopLinkPageCacheEntries (
  2015. PPAGE_CACHE_ENTRY LowerEntry,
  2016. PPAGE_CACHE_ENTRY UpperEntry
  2017. )
  2018. /*++
  2019. Routine Description:
  2020. This routine attempts to link the given link entry to the page cache entry
  2021. so that they can begin sharing a physical page that is currently used by
  2022. the link entry.
  2023. Arguments:
  2024. LowerEntry - Supplies a pointer to the lower (disk) level page cache entry
  2025. whose physical address is to be modified. The caller should ensure that
  2026. its reference on this entry does not come from an I/O buffer or else
  2027. the physical address in the I/O buffer would be invalid. The file
  2028. object lock for this entry must already be held exclusive.
  2029. UpperEntry - Supplies a pointer to the upper (file) page cache entry
  2030. that currently owns the physical page to be shared.
  2031. Return Value:
  2032. Returns TRUE if the two page cache entries are already connected or if the
  2033. routine is successful. It returns FALSE otherwise and both page cache
  2034. entries should continue to use their own physical pages.
  2035. --*/
  2036. {
  2037. ULONG ClearFlags;
  2038. ULONG Delta;
  2039. IO_OBJECT_TYPE LowerType;
  2040. ULONG OldFlags;
  2041. ULONG PageSize;
  2042. PHYSICAL_ADDRESS PhysicalAddress;
  2043. BOOL Result;
  2044. KSTATUS Status;
  2045. IO_OBJECT_TYPE UpperType;
  2046. PVOID VirtualAddress;
  2047. //
  2048. // The lower file object lock must be held exclusively so that no more
  2049. // references can be taken on the page cache entries.
  2050. //
  2051. ASSERT(KeIsSharedExclusiveLockHeldExclusive(LowerEntry->FileObject->Lock));
  2052. ASSERT(LowerEntry->ReferenceCount > 0);
  2053. ASSERT(UpperEntry->ReferenceCount > 0);
  2054. LowerType = LowerEntry->FileObject->Properties.Type;
  2055. UpperType = UpperEntry->FileObject->Properties.Type;
  2056. //
  2057. // Page cache entries with the same I/O type are not allowed to be linked.
  2058. //
  2059. if (LowerType == UpperType) {
  2060. return FALSE;
  2061. }
  2062. //
  2063. // If the two entries are already linked, do nothing.
  2064. //
  2065. if ((LowerType == IoObjectBlockDevice) &&
  2066. (IO_IS_CACHEABLE_FILE(UpperType))) {
  2067. if (UpperEntry->BackingEntry == LowerEntry) {
  2068. return TRUE;
  2069. }
  2070. } else {
  2071. ASSERT(FALSE);
  2072. return FALSE;
  2073. }
  2074. //
  2075. // If the page cache entry that is to be updated has more than one
  2076. // reference then this cannot proceed.
  2077. //
  2078. if (LowerEntry->ReferenceCount != 1) {
  2079. return FALSE;
  2080. }
  2081. VirtualAddress = NULL;
  2082. PhysicalAddress = INVALID_PHYSICAL_ADDRESS;
  2083. //
  2084. // Both entries should be page owners.
  2085. //
  2086. ASSERT((LowerEntry->Flags & UpperEntry->Flags &
  2087. PAGE_CACHE_ENTRY_FLAG_PAGE_OWNER) != 0);
  2088. //
  2089. // Make sure no one has the disk mmaped, since its physical page is about
  2090. // to be destroyed.
  2091. //
  2092. Status = IopUnmapPageCacheEntrySections(LowerEntry, NULL);
  2093. if (!KSUCCESS(Status)) {
  2094. Result = FALSE;
  2095. goto LinkPageCacheEntriesEnd;
  2096. }
  2097. //
  2098. // The upper entry better not be dirty, because the accounting numbers
  2099. // would be off otherwise, and it would result in a dirty non page owner.
  2100. //
  2101. ASSERT((UpperEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) == 0);
  2102. //
  2103. // If the flags differ in mappedness, clear the old mapped flag.
  2104. //
  2105. Delta = LowerEntry->Flags ^ UpperEntry->Flags;
  2106. if ((Delta & LowerEntry->Flags & PAGE_CACHE_ENTRY_FLAG_MAPPED) != 0) {
  2107. OldFlags = RtlAtomicAnd32(&(LowerEntry->Flags),
  2108. ~PAGE_CACHE_ENTRY_FLAG_MAPPED);
  2109. if ((OldFlags & PAGE_CACHE_ENTRY_FLAG_MAPPED) != 0) {
  2110. RtlAtomicAdd(&IoPageCacheMappedPageCount, (UINTN)-1);
  2111. if ((OldFlags & PAGE_CACHE_ENTRY_FLAG_DIRTY) != 0) {
  2112. RtlAtomicAdd(&IoPageCacheMappedDirtyPageCount, (UINTN)-1);
  2113. }
  2114. }
  2115. }
  2116. //
  2117. // Save the address of the physical page that is to be released and update
  2118. // the entries to share the link entry's page.
  2119. //
  2120. PhysicalAddress = LowerEntry->PhysicalAddress;
  2121. VirtualAddress = LowerEntry->VirtualAddress;
  2122. LowerEntry->PhysicalAddress = UpperEntry->PhysicalAddress;
  2123. LowerEntry->VirtualAddress = UpperEntry->VirtualAddress;
  2124. //
  2125. // Clear the mapped flag here because the backing entry owns the mapped
  2126. // page count for this page.
  2127. //
  2128. ClearFlags = PAGE_CACHE_ENTRY_FLAG_MAPPED |
  2129. PAGE_CACHE_ENTRY_FLAG_PAGE_OWNER;
  2130. OldFlags = RtlAtomicAnd32(&(UpperEntry->Flags), ~ClearFlags);
  2131. if ((OldFlags & PAGE_CACHE_ENTRY_FLAG_MAPPED) != 0) {
  2132. RtlAtomicAdd(&IoPageCacheMappedPageCount, (UINTN)-1);
  2133. //
  2134. // Transfer the mapped flag over to the lower entry.
  2135. //
  2136. if ((Delta & PAGE_CACHE_ENTRY_FLAG_MAPPED) != 0) {
  2137. OldFlags = RtlAtomicOr32(&(LowerEntry->Flags),
  2138. PAGE_CACHE_ENTRY_FLAG_MAPPED);
  2139. if ((OldFlags & PAGE_CACHE_ENTRY_FLAG_MAPPED) == 0) {
  2140. RtlAtomicAdd(&IoPageCacheMappedPageCount, 1);
  2141. if ((OldFlags & PAGE_CACHE_ENTRY_FLAG_DIRTY) != 0) {
  2142. RtlAtomicAdd(&IoPageCacheMappedDirtyPageCount, 1);
  2143. }
  2144. //
  2145. // The entry was just used, and may need to come off the clean
  2146. // unmapped list.
  2147. //
  2148. IopUpdatePageCacheEntryList(LowerEntry, FALSE);
  2149. }
  2150. }
  2151. }
  2152. IopUpdatePageCacheEntryList(UpperEntry, FALSE);
  2153. //
  2154. // Now link the two entries based on their types. Note that nothing should
  2155. // have been able to sneak in and link them since the caller has a
  2156. // reference on both entries.
  2157. //
  2158. ASSERT(UpperEntry->BackingEntry == NULL);
  2159. IoPageCacheEntryAddReference(LowerEntry);
  2160. UpperEntry->BackingEntry = LowerEntry;
  2161. Result = TRUE;
  2162. LinkPageCacheEntriesEnd:
  2163. //
  2164. // If the physical page removed from the entry was mapped, unmap it.
  2165. //
  2166. if (VirtualAddress != NULL) {
  2167. PageSize = MmPageSize();
  2168. MmUnmapAddress(VirtualAddress, PageSize);
  2169. }
  2170. //
  2171. // If a physical page was removed from the entry, free it.
  2172. //
  2173. if (PhysicalAddress != INVALID_PHYSICAL_ADDRESS) {
  2174. MmFreePhysicalPage(PhysicalAddress);
  2175. RtlAtomicAdd(&IoPageCachePhysicalPageCount, (UINTN)-1);
  2176. }
  2177. return Result;
  2178. }
  2179. VOID
  2180. IopTrimPageCache (
  2181. BOOL TimidEffort
  2182. )
  2183. /*++
  2184. Routine Description:
  2185. This routine removes as many clean page cache entries as is necessary to
  2186. bring the size of the page cache back down to a reasonable level. It evicts
  2187. the page cache entries in LRU order.
  2188. Arguments:
  2189. TimidEffort - Supplies a boolean indicating whether or not this function
  2190. should only try once to acquire a file object lock before moving on.
  2191. Set this to TRUE if this thread might already be holding file object
  2192. locks.
  2193. Return Value:
  2194. None.
  2195. --*/
  2196. {
  2197. LIST_ENTRY DestroyListHead;
  2198. UINTN FreePageTarget;
  2199. UINTN FreePhysicalPages;
  2200. UINTN PageOutCount;
  2201. UINTN TargetRemoveCount;
  2202. TargetRemoveCount = 0;
  2203. FreePhysicalPages = -1;
  2204. if (IopIsPageCacheTooBig(&FreePhysicalPages) == FALSE) {
  2205. goto TrimPageCacheEnd;
  2206. }
  2207. //
  2208. // The page cache is not leaving enough free physical pages; determine how
  2209. // many entries must be evicted.
  2210. //
  2211. ASSERT(FreePhysicalPages < IoPageCacheHeadroomPagesRetreat);
  2212. TargetRemoveCount = IoPageCacheHeadroomPagesRetreat -
  2213. FreePhysicalPages;
  2214. if (TargetRemoveCount > IoPageCachePhysicalPageCount) {
  2215. TargetRemoveCount = IoPageCachePhysicalPageCount;
  2216. }
  2217. if (IoPageCachePhysicalPageCount - TargetRemoveCount <
  2218. IoPageCacheMinimumPages) {
  2219. TargetRemoveCount = IoPageCachePhysicalPageCount -
  2220. IoPageCacheMinimumPages;
  2221. }
  2222. if ((IoPageCacheDebugFlags & PAGE_CACHE_DEBUG_SIZE_MANAGEMENT) != 0) {
  2223. RtlDebugPrint("PAGE CACHE: Attempt to remove at least %lu entries.\n",
  2224. TargetRemoveCount);
  2225. }
  2226. //
  2227. // Iterate over the clean LRU page cache list trying to find which page
  2228. // cache entries can be removed. Stop as soon as the target count has been
  2229. // reached.
  2230. //
  2231. INITIALIZE_LIST_HEAD(&DestroyListHead);
  2232. if (!LIST_EMPTY(&IoPageCacheCleanUnmappedList)) {
  2233. IopRemovePageCacheEntriesFromList(&IoPageCacheCleanUnmappedList,
  2234. &DestroyListHead,
  2235. TimidEffort,
  2236. &TargetRemoveCount);
  2237. }
  2238. if (TargetRemoveCount != 0) {
  2239. IopRemovePageCacheEntriesFromList(&IoPageCacheCleanList,
  2240. &DestroyListHead,
  2241. TimidEffort,
  2242. &TargetRemoveCount);
  2243. }
  2244. //
  2245. // Destroy the evicted page cache entries. This will reduce the page
  2246. // cache's physical page count for any page that it ends up releasing.
  2247. //
  2248. IopDestroyPageCacheEntries(&DestroyListHead);
  2249. TrimPageCacheEnd:
  2250. //
  2251. // Also unmap things if the remaining page cache is causing too much
  2252. // virtual memory pressure.
  2253. //
  2254. IopTrimPageCacheVirtual(TimidEffort);
  2255. //
  2256. // If the page cache is smaller than its target, ask MM to page out some
  2257. // things so the page cache can grow back up to its target. This throws
  2258. // pageable data into the mix, so if a process allocates a boatload of
  2259. // memory, the page cache doesn't shrink to a dot and constantly lose the
  2260. // working set of the process.
  2261. //
  2262. if ((TargetRemoveCount != 0) &&
  2263. (IoPageCachePhysicalPageCount < IoPageCacheMinimumPagesTarget)) {
  2264. PageOutCount = IoPageCacheMinimumPagesTarget -
  2265. IoPageCachePhysicalPageCount;
  2266. FreePageTarget = FreePhysicalPages + PageOutCount;
  2267. if ((IoPageCacheDebugFlags & PAGE_CACHE_DEBUG_SIZE_MANAGEMENT) != 0) {
  2268. RtlDebugPrint("PAGE CACHE: Requesting page out: 0x%I64x\n",
  2269. PageOutCount);
  2270. }
  2271. MmRequestPagingOut(FreePageTarget);
  2272. }
  2273. return;
  2274. }
  2275. BOOL
  2276. IopIsPageCacheTooDirty (
  2277. VOID
  2278. )
  2279. /*++
  2280. Routine Description:
  2281. This routine determines if the page cache has an uncomfortable number of
  2282. entries in it that are dirty. Dirty entries are dangerous because they
  2283. prevent the page cache from shrinking if memory gets tight.
  2284. Arguments:
  2285. None.
  2286. Return Value:
  2287. TRUE if the page cache has too many dirty entries and adding new ones
  2288. should generally be avoided.
  2289. FALSE if the page cache is relatively clean.
  2290. --*/
  2291. {
  2292. UINTN DirtyPages;
  2293. UINTN FreePages;
  2294. UINTN IdealSize;
  2295. UINTN MaxDirty;
  2296. DirtyPages = IoPageCacheDirtyPageCount;
  2297. if (DirtyPages >= IoPageCacheMaxDirtyPages) {
  2298. return TRUE;
  2299. }
  2300. //
  2301. // Determine the ideal page cache size.
  2302. //
  2303. FreePages = MmGetTotalFreePhysicalPages();
  2304. if (FreePages < IoPageCacheHeadroomPagesRetreat) {
  2305. IdealSize = IoPageCachePhysicalPageCount -
  2306. (IoPageCacheHeadroomPagesRetreat - FreePages);
  2307. } else {
  2308. IdealSize = IoPageCachePhysicalPageCount +
  2309. (FreePages - IoPageCacheHeadroomPagesRetreat);
  2310. }
  2311. //
  2312. // Only a portion of that ideal size should be dirty.
  2313. //
  2314. MaxDirty = IdealSize >> PAGE_CACHE_MAX_DIRTY_SHIFT;
  2315. if (DirtyPages >= MaxDirty) {
  2316. return TRUE;
  2317. }
  2318. return FALSE;
  2319. }
  2320. COMPARISON_RESULT
  2321. IopComparePageCacheEntries (
  2322. PRED_BLACK_TREE Tree,
  2323. PRED_BLACK_TREE_NODE FirstNode,
  2324. PRED_BLACK_TREE_NODE SecondNode
  2325. )
  2326. /*++
  2327. Routine Description:
  2328. This routine compares two Red-Black tree nodes contained inside file
  2329. objects.
  2330. Arguments:
  2331. Tree - Supplies a pointer to the Red-Black tree that owns both nodes.
  2332. FirstNode - Supplies a pointer to the left side of the comparison.
  2333. SecondNode - Supplies a pointer to the second side of the comparison.
  2334. Return Value:
  2335. Same if the two nodes have the same value.
  2336. Ascending if the first node is less than the second node.
  2337. Descending if the second node is less than the first node.
  2338. --*/
  2339. {
  2340. PPAGE_CACHE_ENTRY FirstEntry;
  2341. PPAGE_CACHE_ENTRY SecondEntry;
  2342. FirstEntry = RED_BLACK_TREE_VALUE(FirstNode, PAGE_CACHE_ENTRY, Node);
  2343. SecondEntry = RED_BLACK_TREE_VALUE(SecondNode, PAGE_CACHE_ENTRY, Node);
  2344. if (FirstEntry->Offset < SecondEntry->Offset) {
  2345. return ComparisonResultAscending;
  2346. } else if (FirstEntry->Offset > SecondEntry->Offset) {
  2347. return ComparisonResultDescending;
  2348. }
  2349. return ComparisonResultSame;
  2350. }
  2351. //
  2352. // --------------------------------------------------------- Internal Functions
  2353. //
  2354. PPAGE_CACHE_ENTRY
  2355. IopCreatePageCacheEntry (
  2356. PFILE_OBJECT FileObject,
  2357. PVOID VirtualAddress,
  2358. PHYSICAL_ADDRESS PhysicalAddress,
  2359. IO_OFFSET Offset
  2360. )
  2361. /*++
  2362. Routine Description:
  2363. This routine creates a page cache entry.
  2364. Arguments:
  2365. FileObject - Supplies a pointer to the file object for the device or file
  2366. that owns the page.
  2367. VirtualAddress - Supplies an optional virtual address for the page.
  2368. PhysicalAddress - Supplies the physical address of the page.
  2369. Offset - Supplies the offset into the file or device where the page is
  2370. from.
  2371. Return Value:
  2372. Returns a pointer to a page cache entry on success, or NULL on failure.
  2373. --*/
  2374. {
  2375. PPAGE_CACHE_ENTRY PageCacheEntry;
  2376. ASSERT(IS_ALIGNED(PhysicalAddress, MmPageSize()) != FALSE);
  2377. ASSERT((FileObject->Properties.Type != IoObjectBlockDevice) ||
  2378. (Offset < (FileObject->Properties.BlockSize *
  2379. FileObject->Properties.BlockCount)));
  2380. //
  2381. // Allocate and initialize a new page cache entry.
  2382. //
  2383. PageCacheEntry = MmAllocateBlock(IoPageCacheBlockAllocator, NULL);
  2384. if (PageCacheEntry == NULL) {
  2385. goto CreatePageCacheEntryEnd;
  2386. }
  2387. RtlZeroMemory(PageCacheEntry, sizeof(PAGE_CACHE_ENTRY));
  2388. IopFileObjectAddReference(FileObject);
  2389. PageCacheEntry->FileObject = FileObject;
  2390. PageCacheEntry->Offset = Offset;
  2391. PageCacheEntry->PhysicalAddress = PhysicalAddress;
  2392. if (VirtualAddress != NULL) {
  2393. if (IoPageCacheDisableVirtualAddresses == FALSE) {
  2394. PageCacheEntry->VirtualAddress = VirtualAddress;
  2395. }
  2396. }
  2397. PageCacheEntry->ReferenceCount = 1;
  2398. CreatePageCacheEntryEnd:
  2399. return PageCacheEntry;
  2400. }
  2401. VOID
  2402. IopDestroyPageCacheEntries (
  2403. PLIST_ENTRY ListHead
  2404. )
  2405. /*++
  2406. Routine Description:
  2407. This routine destroys (or attempts to destroy) a list of page cache entries.
  2408. Page cache entries that are not successfully destroyed will be marked
  2409. evicted and put back on the global removal list for destruction later.
  2410. Arguments:
  2411. ListHead - Supplies a pointer to the head of the list of entries to
  2412. destroy.
  2413. Return Value:
  2414. None. All page cache entries on this list are removed and either destroyed
  2415. or put back on the global removal list for destruction later.
  2416. --*/
  2417. {
  2418. PLIST_ENTRY CurrentEntry;
  2419. PPAGE_CACHE_ENTRY PageCacheEntry;
  2420. UINTN RemovedCount;
  2421. ASSERT(KeGetRunLevel() == RunLevelLow);
  2422. RemovedCount = 0;
  2423. while (LIST_EMPTY(ListHead) == FALSE) {
  2424. CurrentEntry = ListHead->Next;
  2425. PageCacheEntry = LIST_VALUE(CurrentEntry, PAGE_CACHE_ENTRY, ListEntry);
  2426. LIST_REMOVE(CurrentEntry);
  2427. CurrentEntry->Next = NULL;
  2428. ASSERT(PageCacheEntry->ReferenceCount == 0);
  2429. ASSERT(PageCacheEntry->Node.Parent == NULL);
  2430. if ((IoPageCacheDebugFlags & PAGE_CACHE_DEBUG_EVICTION) != 0) {
  2431. RtlDebugPrint("PAGE CACHE: Destroy entry 0x%08x: file object "
  2432. "0x%08x, offset 0x%I64x, physical address 0x%I64x, "
  2433. "reference count %d, flags 0x%08x.\n",
  2434. PageCacheEntry,
  2435. PageCacheEntry->FileObject,
  2436. PageCacheEntry->Offset,
  2437. PageCacheEntry->PhysicalAddress,
  2438. PageCacheEntry->ReferenceCount,
  2439. PageCacheEntry->Flags);
  2440. }
  2441. IopDestroyPageCacheEntry(PageCacheEntry);
  2442. RemovedCount += 1;
  2443. }
  2444. //
  2445. // Notify the debugger if any page cache entries were destroyed or failed
  2446. // to be destroyed.
  2447. //
  2448. if ((IoPageCacheDebugFlags & PAGE_CACHE_DEBUG_SIZE_MANAGEMENT) != 0) {
  2449. if (RemovedCount != 0) {
  2450. RtlDebugPrint("PAGE CACHE: Removed %d entries.\n", RemovedCount);
  2451. }
  2452. }
  2453. return;
  2454. }
  2455. VOID
  2456. IopDestroyPageCacheEntry (
  2457. PPAGE_CACHE_ENTRY PageCacheEntry
  2458. )
  2459. /*++
  2460. Routine Description:
  2461. This routine destroys the given page cache entry. It is assumed that the
  2462. page cache entry has already been removed from the cache and that is it
  2463. not dirty.
  2464. Arguments:
  2465. PageCacheEntry - Supplies a pointer to the page cache entry.
  2466. Return Value:
  2467. None.
  2468. --*/
  2469. {
  2470. PPAGE_CACHE_ENTRY BackingEntry;
  2471. PFILE_OBJECT FileObject;
  2472. ULONG PageSize;
  2473. FileObject = PageCacheEntry->FileObject;
  2474. ASSERT((PageCacheEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) == 0);
  2475. ASSERT((PageCacheEntry->ReferenceCount == 0) ||
  2476. (PageCacheEntry->Node.Parent == NULL));
  2477. ASSERT(PageCacheEntry->ListEntry.Next == NULL);
  2478. //
  2479. // If this is the page owner, then free the physical page.
  2480. //
  2481. if ((PageCacheEntry->Flags & PAGE_CACHE_ENTRY_FLAG_PAGE_OWNER) != 0) {
  2482. if ((PageCacheEntry->Flags & PAGE_CACHE_ENTRY_FLAG_MAPPED) != 0) {
  2483. ASSERT(PageCacheEntry->VirtualAddress != NULL);
  2484. PageSize = MmPageSize();
  2485. MmUnmapAddress(PageCacheEntry->VirtualAddress, PageSize);
  2486. RtlAtomicAdd(&IoPageCacheMappedPageCount, (UINTN)-1);
  2487. RtlAtomicAnd32(&(PageCacheEntry->Flags),
  2488. ~PAGE_CACHE_ENTRY_FLAG_MAPPED);
  2489. PageCacheEntry->VirtualAddress = NULL;
  2490. }
  2491. MmFreePhysicalPage(PageCacheEntry->PhysicalAddress);
  2492. RtlAtomicAdd(&IoPageCachePhysicalPageCount, (UINTN)-1);
  2493. PageCacheEntry->PhysicalAddress = INVALID_PHYSICAL_ADDRESS;
  2494. //
  2495. // Otherwise release the reference on the page cache owner if it exists.
  2496. //
  2497. } else if (PageCacheEntry->BackingEntry != NULL) {
  2498. BackingEntry = PageCacheEntry->BackingEntry;
  2499. //
  2500. // The virtual address must either be NULL or match the backing entry's
  2501. // virtual address. It should never be the case that the backing entry
  2502. // is not mapped while the non-backing entry is mapped.
  2503. //
  2504. ASSERT((PageCacheEntry->VirtualAddress == NULL) ||
  2505. (PageCacheEntry->VirtualAddress ==
  2506. BackingEntry->VirtualAddress));
  2507. ASSERT(PageCacheEntry->PhysicalAddress ==
  2508. BackingEntry->PhysicalAddress);
  2509. IoPageCacheEntryReleaseReference(BackingEntry);
  2510. PageCacheEntry->BackingEntry = NULL;
  2511. }
  2512. //
  2513. // Release the reference on the file object.
  2514. //
  2515. IopFileObjectReleaseReference(FileObject);
  2516. ASSERT((PageCacheEntry->ReferenceCount == 0) &&
  2517. (PageCacheEntry->Node.Parent == NULL));
  2518. //
  2519. // With the final reference gone, free the page cache entry.
  2520. //
  2521. MmFreeBlock(IoPageCacheBlockAllocator, PageCacheEntry);
  2522. return;
  2523. }
  2524. VOID
  2525. IopInsertPageCacheEntry (
  2526. PPAGE_CACHE_ENTRY NewEntry,
  2527. PPAGE_CACHE_ENTRY LinkEntry
  2528. )
  2529. /*++
  2530. Routine Description:
  2531. This routine inserts the new page cache entry into the page cache and links
  2532. it to the link entry once it is inserted. This routine assumes that the
  2533. page cache tree lock is held exclusively and that there is not already an
  2534. entry for the same file and offset in the tree.
  2535. Arguments:
  2536. NewEntry - Supplies a pointer to the new entry to insert into the page
  2537. cache.
  2538. LinkEntry - Supplies an optional pointer to an existing page cache entry
  2539. to link to the page cache entry.
  2540. Return Value:
  2541. None.
  2542. --*/
  2543. {
  2544. ULONG ClearFlags;
  2545. IO_OBJECT_TYPE LinkType;
  2546. IO_OBJECT_TYPE NewType;
  2547. ULONG OldFlags;
  2548. PVOID VirtualAddress;
  2549. ASSERT(KeIsSharedExclusiveLockHeldExclusive(NewEntry->FileObject->Lock));
  2550. ASSERT(NewEntry->Flags == 0);
  2551. //
  2552. // Insert the new entry into its file object's tree.
  2553. //
  2554. RtlRedBlackTreeInsert(&(NewEntry->FileObject->PageCacheTree),
  2555. &(NewEntry->Node));
  2556. IoPageCacheEntryCount += 1;
  2557. //
  2558. // Now link the new entry to the supplied link entry based on their I/O
  2559. // types.
  2560. //
  2561. if (LinkEntry != NULL) {
  2562. LinkType = LinkEntry->FileObject->Properties.Type;
  2563. NewType = NewEntry->FileObject->Properties.Type;
  2564. ASSERT(LinkType != NewType);
  2565. ASSERT(IS_IO_OBJECT_TYPE_LINKABLE(LinkType) != FALSE);
  2566. ASSERT(IS_IO_OBJECT_TYPE_LINKABLE(NewType) != FALSE);
  2567. ASSERT((LinkEntry->Flags & PAGE_CACHE_ENTRY_FLAG_PAGE_OWNER) != 0);
  2568. ASSERT(LinkEntry->PhysicalAddress == NewEntry->PhysicalAddress);
  2569. ASSERT((LinkEntry->VirtualAddress == NewEntry->VirtualAddress) ||
  2570. (NewEntry->VirtualAddress == NULL));
  2571. if ((LinkType == IoObjectBlockDevice) &&
  2572. (IO_IS_CACHEABLE_FILE(NewType))) {
  2573. IoPageCacheEntryAddReference(LinkEntry);
  2574. NewEntry->BackingEntry = LinkEntry;
  2575. } else {
  2576. ASSERT((IO_IS_CACHEABLE_FILE(LinkType)) &&
  2577. (NewType == IoObjectBlockDevice));
  2578. IoPageCacheEntryAddReference(NewEntry);
  2579. LinkEntry->BackingEntry = NewEntry;
  2580. ClearFlags = PAGE_CACHE_ENTRY_FLAG_PAGE_OWNER |
  2581. PAGE_CACHE_ENTRY_FLAG_MAPPED;
  2582. OldFlags = RtlAtomicAnd32(&(LinkEntry->Flags), ~ClearFlags);
  2583. //
  2584. // The link entry had better not be dirty, because then it would be
  2585. // a dirty non-page-owner entry, which messes up the accounting.
  2586. //
  2587. ASSERT((OldFlags & PAGE_CACHE_ENTRY_FLAG_DIRTY) == 0);
  2588. NewEntry->Flags = PAGE_CACHE_ENTRY_FLAG_PAGE_OWNER;
  2589. //
  2590. // If the old entry was mapped, it better be the same mapping as
  2591. // the new entry (if any), since otherwise the new entry VA would
  2592. // be leaked.
  2593. //
  2594. if ((OldFlags & PAGE_CACHE_ENTRY_FLAG_MAPPED) != 0) {
  2595. VirtualAddress = LinkEntry->VirtualAddress;
  2596. ASSERT((VirtualAddress != NULL) &&
  2597. ((NewEntry->VirtualAddress == NULL) ||
  2598. (NewEntry->VirtualAddress == VirtualAddress)));
  2599. NewEntry->VirtualAddress = VirtualAddress;
  2600. NewEntry->Flags |= PAGE_CACHE_ENTRY_FLAG_MAPPED;
  2601. }
  2602. }
  2603. } else {
  2604. if (NewEntry->VirtualAddress != NULL) {
  2605. NewEntry->Flags |= PAGE_CACHE_ENTRY_FLAG_MAPPED;
  2606. RtlAtomicAdd(&IoPageCacheMappedPageCount, 1);
  2607. }
  2608. RtlAtomicAdd(&IoPageCachePhysicalPageCount, 1);
  2609. NewEntry->Flags |= PAGE_CACHE_ENTRY_FLAG_PAGE_OWNER;
  2610. MmSetPageCacheEntryForPhysicalAddress(NewEntry->PhysicalAddress,
  2611. NewEntry);
  2612. }
  2613. return;
  2614. }
  2615. PPAGE_CACHE_ENTRY
  2616. IopLookupPageCacheEntryHelper (
  2617. PFILE_OBJECT FileObject,
  2618. IO_OFFSET Offset
  2619. )
  2620. /*++
  2621. Routine Description:
  2622. This routine searches for a page cache entry based on the file object and
  2623. offset. This routine assumes the page cache lock is held. If found, this
  2624. routine takes a reference on the page cache entry.
  2625. Arguments:
  2626. FileObject - Supplies a pointer to the file object for the file or device.
  2627. Offset - Supplies an offset into the file or device.
  2628. Return Value:
  2629. Returns a pointer to the found page cache entry on success, or NULL on
  2630. failure.
  2631. --*/
  2632. {
  2633. PPAGE_CACHE_ENTRY FoundEntry;
  2634. PRED_BLACK_TREE_NODE FoundNode;
  2635. PAGE_CACHE_ENTRY SearchEntry;
  2636. SearchEntry.FileObject = FileObject;
  2637. SearchEntry.Offset = Offset;
  2638. SearchEntry.Flags = 0;
  2639. FoundNode = RtlRedBlackTreeSearch(&(FileObject->PageCacheTree),
  2640. &(SearchEntry.Node));
  2641. if (FoundNode == NULL) {
  2642. return NULL;
  2643. }
  2644. FoundEntry = RED_BLACK_TREE_VALUE(FoundNode, PAGE_CACHE_ENTRY, Node);
  2645. IoPageCacheEntryAddReference(FoundEntry);
  2646. return FoundEntry;
  2647. }
  2648. VOID
  2649. IopPageCacheThread (
  2650. PVOID Parameter
  2651. )
  2652. /*++
  2653. Routine Description:
  2654. This routine cleans cached pages and removes clean pages if the cache is
  2655. consuming too much memory.
  2656. Arguments:
  2657. Parameter - Supplies a pointer supplied by the creator of the thread. This
  2658. parameter is not used.
  2659. Return Value:
  2660. None.
  2661. --*/
  2662. {
  2663. ULONGLONG CurrentTime;
  2664. PKEVENT PhysicalMemoryWarningEvent;
  2665. PVOID SignalingObject;
  2666. KSTATUS Status;
  2667. PKEVENT VirtualMemoryWarningEvent;
  2668. PVOID WaitObjectArray[3];
  2669. Status = STATUS_SUCCESS;
  2670. IoPageCacheThread = KeGetCurrentThread();
  2671. //
  2672. // Get the memory warning events from the memory manager.
  2673. //
  2674. PhysicalMemoryWarningEvent = MmGetPhysicalMemoryWarningEvent();
  2675. VirtualMemoryWarningEvent = MmGetVirtualMemoryWarningEvent();
  2676. ASSERT(PhysicalMemoryWarningEvent != NULL);
  2677. ASSERT(VirtualMemoryWarningEvent != NULL);
  2678. //
  2679. // There are only three objects to wait for and as this is less than the
  2680. // thread's built-in wait blocks, do not pre-allocate a wait block.
  2681. //
  2682. ASSERT(3 < BUILTIN_WAIT_BLOCK_ENTRY_COUNT);
  2683. WaitObjectArray[0] = IoPageCacheWorkTimer;
  2684. WaitObjectArray[1] = PhysicalMemoryWarningEvent;
  2685. WaitObjectArray[2] = VirtualMemoryWarningEvent;
  2686. //
  2687. // Loop forever waiting for either the page cache timer or the memory
  2688. // manager's warning event.
  2689. //
  2690. while (TRUE) {
  2691. Status = ObWaitOnObjects(WaitObjectArray,
  2692. 3,
  2693. 0,
  2694. WAIT_TIME_INDEFINITE,
  2695. NULL,
  2696. &SignalingObject);
  2697. ASSERT(KSUCCESS(Status));
  2698. //
  2699. // The page cache cleaning is about to start. Mark down the current
  2700. // time as the last time the cleaning ran. The leaves a record that an
  2701. // attempt was made to flush any writes that occurred before this time.
  2702. //
  2703. CurrentTime = KeGetRecentTimeCounter();
  2704. WRITE_INT64_SYNC(&IoPageCacheLastCleanTime, CurrentTime);
  2705. //
  2706. // Loop over the process of removing excess entries and flushing
  2707. // dirty entries. The flush code may decided to loop back and remove
  2708. // more excess entries.
  2709. //
  2710. while (TRUE) {
  2711. Status = STATUS_SUCCESS;
  2712. //
  2713. // Blast away the list of page cache entries that are ready for
  2714. // removal.
  2715. //
  2716. IopTrimRemovalPageCacheList();
  2717. //
  2718. // Attempt to trim out some clean page cache entries from the LRU
  2719. // list. This routine should only do any work if memory is tight.
  2720. // This is the root of the page cache thread, so there's never
  2721. // recursive I/O to worry about (so go ahead and destroy file
  2722. // objects).
  2723. //
  2724. IopTrimPageCache(FALSE);
  2725. //
  2726. // Flush some dirty file objects.
  2727. //
  2728. Status = IopFlushFileObjects(0, 0, NULL);
  2729. if ((IoPageCacheDebugFlags & PAGE_CACHE_DEBUG_DIRTY_LISTS) != 0) {
  2730. IopCheckDirtyFileObjectsList();
  2731. }
  2732. if (Status == STATUS_TRY_AGAIN) {
  2733. continue;
  2734. }
  2735. //
  2736. // If the page cache appears to be completely clean, try to kill the
  2737. // timer and go dormant. Kill the timer, change the state to clean,
  2738. // and then see if any dirtiness snuck in while that was happening.
  2739. // If so, set it back to dirty (racing with everyone else that
  2740. // may have already done that).
  2741. //
  2742. KeCancelTimer(IoPageCacheWorkTimer);
  2743. RtlAtomicExchange32(&IoPageCacheState, PageCacheStateClean);
  2744. if ((!LIST_EMPTY(&IoFileObjectsDirtyList)) ||
  2745. (IoPageCacheDirtyPageCount != 0)) {
  2746. IopSchedulePageCacheThread();
  2747. }
  2748. break;
  2749. }
  2750. }
  2751. return;
  2752. }
  2753. KSTATUS
  2754. IopFlushPageCacheBuffer (
  2755. PIO_BUFFER FlushBuffer,
  2756. UINTN FlushSize,
  2757. ULONG Flags
  2758. )
  2759. /*++
  2760. Routine Description:
  2761. This routine flushes the given buffer to the owning file or device. This
  2762. routine assumes that the lock of the file object that owns the page cache
  2763. entries is held in the appropriate mode.
  2764. Arguments:
  2765. FlushBuffer - Supplies a pointer to a cache-backed I/O buffer to flush.
  2766. FlushSize - Supplies the number of bytes to flush.
  2767. Flags - Supplies a bitmaks of I/O flags for the flush. See IO_FLAG_* for
  2768. definitions.
  2769. Return Value:
  2770. Status code.
  2771. --*/
  2772. {
  2773. UINTN BufferOffset;
  2774. UINTN BytesToWrite;
  2775. PPAGE_CACHE_ENTRY CacheEntry;
  2776. BOOL Clean;
  2777. PFILE_OBJECT FileObject;
  2778. IO_OFFSET FileOffset;
  2779. ULONGLONG FileSize;
  2780. IO_CONTEXT IoContext;
  2781. BOOL MarkedClean;
  2782. ULONG PageSize;
  2783. KSTATUS Status;
  2784. CacheEntry = MmGetIoBufferPageCacheEntry(FlushBuffer, 0);
  2785. FileObject = CacheEntry->FileObject;
  2786. FileOffset = CacheEntry->Offset;
  2787. PageSize = MmPageSize();
  2788. READ_INT64_SYNC(&(FileObject->Properties.FileSize), &FileSize);
  2789. ASSERT(FlushSize <= PAGE_CACHE_FLUSH_MAX);
  2790. ASSERT(KeIsSharedExclusiveLockHeld(FileObject->Lock) != FALSE);
  2791. //
  2792. // Try to mark all the pages clean. If they are all already clean, then
  2793. // just exit. Something is already performing the I/O. Also make sure that
  2794. // all the supplied page cache entries are still in the cache. If an
  2795. // evicted entry is found, do not write any data from that page or further;
  2796. // the file was truncated.
  2797. //
  2798. BufferOffset = 0;
  2799. BytesToWrite = 0;
  2800. Clean = TRUE;
  2801. while (BufferOffset < FlushSize) {
  2802. CacheEntry = MmGetIoBufferPageCacheEntry(FlushBuffer, BufferOffset);
  2803. if (CacheEntry->Node.Parent == NULL) {
  2804. break;
  2805. }
  2806. //
  2807. // Evicted entries should never be flushed.
  2808. //
  2809. ASSERT(CacheEntry->Node.Parent != NULL);
  2810. MarkedClean = IopMarkPageCacheEntryClean(CacheEntry, TRUE);
  2811. if (MarkedClean != FALSE) {
  2812. Clean = FALSE;
  2813. }
  2814. BytesToWrite += PageSize;
  2815. BufferOffset += PageSize;
  2816. }
  2817. //
  2818. // Avoid writing beyond the end of the file.
  2819. //
  2820. if (FileOffset + BytesToWrite > FileSize) {
  2821. ASSERT(FileOffset <= FileSize);
  2822. BytesToWrite = FileSize - FileOffset;
  2823. }
  2824. //
  2825. // If there are no bytes to write, because all the pages got evicted, then
  2826. // exit now.
  2827. //
  2828. if (BytesToWrite == 0) {
  2829. Status = STATUS_SUCCESS;
  2830. goto FlushPageCacheBufferEnd;
  2831. }
  2832. //
  2833. // Exit now if it was already clean, unless this is synchronized I/O. It
  2834. // could be that the backing entries are what require flushing and this
  2835. // layer does not have jurisdiction to mark them clean.
  2836. //
  2837. if ((Clean != FALSE) && ((Flags & IO_FLAG_DATA_SYNCHRONIZED) == 0)) {
  2838. Status = STATUS_SUCCESS;
  2839. goto FlushPageCacheBufferEnd;
  2840. }
  2841. //
  2842. // For block devices, drop the lock. They're responsible for their own
  2843. // synchronization.
  2844. //
  2845. if (FileObject->Properties.Type == IoObjectBlockDevice) {
  2846. KeReleaseSharedExclusiveLockShared(FileObject->Lock);
  2847. }
  2848. IoContext.IoBuffer = FlushBuffer;
  2849. IoContext.Offset = FileOffset;
  2850. IoContext.SizeInBytes = BytesToWrite;
  2851. IoContext.Flags = Flags;
  2852. IoContext.TimeoutInMilliseconds = WAIT_TIME_INDEFINITE;
  2853. IoContext.Write = TRUE;
  2854. Status = IopPerformNonCachedWrite(FileObject, &IoContext, NULL);
  2855. if (FileObject->Properties.Type == IoObjectBlockDevice) {
  2856. KeAcquireSharedExclusiveLockShared(FileObject->Lock);
  2857. }
  2858. if ((IoPageCacheDebugFlags & PAGE_CACHE_DEBUG_FLUSH) != 0) {
  2859. if ((!KSUCCESS(Status)) || (Flags != 0) ||
  2860. (IoContext.BytesCompleted != BytesToWrite)) {
  2861. RtlDebugPrint("PAGE CACHE: Flushed FILE_OBJECT 0x%08x "
  2862. "with status 0x%08x: flags 0x%x, file offset "
  2863. "0x%I64x, bytes attempted 0x%x, bytes completed "
  2864. "0x%x.\n",
  2865. FileObject,
  2866. Status,
  2867. Flags,
  2868. FileOffset,
  2869. BytesToWrite,
  2870. IoContext.BytesCompleted);
  2871. } else {
  2872. RtlDebugPrint("PAGE CACHE: Flushed FILE_OBJECT %x "
  2873. "Offset 0x%I64x Size 0x%x\n",
  2874. FileObject,
  2875. FileOffset,
  2876. BytesToWrite);
  2877. }
  2878. }
  2879. if (!KSUCCESS(Status)) {
  2880. goto FlushPageCacheBufferEnd;
  2881. }
  2882. if (IoContext.BytesCompleted != BytesToWrite) {
  2883. ASSERT(FALSE);
  2884. Status = STATUS_DATA_LENGTH_MISMATCH;
  2885. goto FlushPageCacheBufferEnd;
  2886. }
  2887. Status = STATUS_SUCCESS;
  2888. FlushPageCacheBufferEnd:
  2889. if (!KSUCCESS(Status)) {
  2890. //
  2891. // Mark the non-written pages as dirty again.
  2892. //
  2893. BufferOffset = ALIGN_RANGE_DOWN(IoContext.BytesCompleted, PageSize);
  2894. while (BufferOffset < BytesToWrite) {
  2895. CacheEntry = MmGetIoBufferPageCacheEntry(FlushBuffer, BufferOffset);
  2896. IopMarkPageCacheEntryDirty(CacheEntry);
  2897. BufferOffset += PageSize;
  2898. }
  2899. if (IoContext.BytesCompleted != BytesToWrite) {
  2900. IopMarkFileObjectDirty(CacheEntry->FileObject);
  2901. }
  2902. }
  2903. return Status;
  2904. }
  2905. VOID
  2906. IopTrimRemovalPageCacheList (
  2907. VOID
  2908. )
  2909. /*++
  2910. Routine Description:
  2911. This routine removes the page cache entries from the list of page cache
  2912. entries that are ready for removal.
  2913. Arguments:
  2914. None.
  2915. Return Value:
  2916. None.
  2917. --*/
  2918. {
  2919. LIST_ENTRY DestroyListHead;
  2920. if (LIST_EMPTY(&(IoPageCacheRemovalList)) != FALSE) {
  2921. return;
  2922. }
  2923. INITIALIZE_LIST_HEAD(&DestroyListHead);
  2924. IopRemovePageCacheEntriesFromList(&IoPageCacheRemovalList,
  2925. &DestroyListHead,
  2926. FALSE,
  2927. NULL);
  2928. //
  2929. // Destroy the evicted page cache entries. This will reduce the page
  2930. // cache's physical page count for any page that it ends up releasing.
  2931. //
  2932. IopDestroyPageCacheEntries(&DestroyListHead);
  2933. return;
  2934. }
  2935. VOID
  2936. IopRemovePageCacheEntriesFromList (
  2937. PLIST_ENTRY PageCacheListHead,
  2938. PLIST_ENTRY DestroyListHead,
  2939. BOOL TimidEffort,
  2940. PUINTN TargetRemoveCount
  2941. )
  2942. /*++
  2943. Routine Description:
  2944. This routine processes page cache entries in the given list, removing them
  2945. from the tree and the list, if possible. If a target remove count is
  2946. supplied, then the removal process will stop as soon as the removal count
  2947. reaches 0 or the end of the list is reached. This routine assumes that the
  2948. page cache tree lock is held exclusively.
  2949. Arguments:
  2950. PageCacheListHead - Supplies a pointer to the head of the page cache list.
  2951. DestroyListHead - Supplies a pointer to the head of the list of page cache
  2952. entries that can be destroyed as a result of the removal process.
  2953. TimidEffort - Supplies a boolean indicating whether or not this function
  2954. should only try once to acquire a file object lock before moving on.
  2955. Set this to TRUE if this thread might already be holding file object
  2956. locks.
  2957. TargetRemoveCount - Supplies an optional pointer to the number of page
  2958. cache entries the caller wishes to remove from the list. On return, it
  2959. will store the difference between the target and the actual number of
  2960. page cache entries removed. If not supplied, then the routine will
  2961. process the entire list looking for page cache entries to remove.
  2962. Return Value:
  2963. None.
  2964. --*/
  2965. {
  2966. PFILE_OBJECT FileObject;
  2967. ULONG Flags;
  2968. LIST_ENTRY LocalList;
  2969. PSHARED_EXCLUSIVE_LOCK Lock;
  2970. PPAGE_CACHE_ENTRY PageCacheEntry;
  2971. BOOL PageTakenDown;
  2972. BOOL PageWasDirty;
  2973. KSTATUS Status;
  2974. KeAcquireQueuedLock(IoPageCacheListLock);
  2975. if (LIST_EMPTY(PageCacheListHead)) {
  2976. KeReleaseQueuedLock(IoPageCacheListLock);
  2977. return;
  2978. }
  2979. //
  2980. // Move the contents of the list over to a local list to avoid infinitely
  2981. // working on the same entries. The local list is also protected by the
  2982. // list lock, and cannot be manipulated without it.
  2983. //
  2984. MOVE_LIST(PageCacheListHead, &LocalList);
  2985. INITIALIZE_LIST_HEAD(PageCacheListHead);
  2986. while ((!LIST_EMPTY(&LocalList)) &&
  2987. ((TargetRemoveCount == NULL) || (*TargetRemoveCount != 0))) {
  2988. PageCacheEntry = LIST_VALUE(LocalList.Next,
  2989. PAGE_CACHE_ENTRY,
  2990. ListEntry);
  2991. FileObject = PageCacheEntry->FileObject;
  2992. Flags = PageCacheEntry->Flags;
  2993. //
  2994. // Remove anything with a reference to avoid iterating through it
  2995. // over and over. When that last reference is dropped, it will be put
  2996. // back on.
  2997. //
  2998. if (PageCacheEntry->ReferenceCount != 0) {
  2999. LIST_REMOVE(&(PageCacheEntry->ListEntry));
  3000. PageCacheEntry->ListEntry.Next = NULL;
  3001. //
  3002. // Double check the reference count. If it dropped to zero while
  3003. // the entry was being removed, it may not have observed the list
  3004. // entry being nulled out, and may not be waiting to put the entry
  3005. // back.
  3006. //
  3007. RtlMemoryBarrier();
  3008. if (PageCacheEntry->ReferenceCount == 0) {
  3009. INSERT_BEFORE(&(PageCacheEntry->ListEntry),
  3010. &IoPageCacheCleanList);
  3011. }
  3012. continue;
  3013. }
  3014. //
  3015. // If it's dirty, then there must be another thread that just marked it
  3016. // dirty but has yet to remove it from the list. Remove it and move on.
  3017. //
  3018. if ((Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) != 0) {
  3019. LIST_REMOVE(&(PageCacheEntry->ListEntry));
  3020. PageCacheEntry->ListEntry.Next = NULL;
  3021. continue;
  3022. }
  3023. //
  3024. // For timid attempts, try to get the lock without dropping the list
  3025. // lock (since for a single attempt lock inversions are not an issue).
  3026. // If it fails, just move on in case this thread already owns the lock
  3027. // in question further up the stack.
  3028. //
  3029. Lock = FileObject->Lock;
  3030. if (TimidEffort != FALSE) {
  3031. if (KeTryToAcquireSharedExclusiveLockExclusive(Lock) == FALSE) {
  3032. LIST_REMOVE(&(PageCacheEntry->ListEntry));
  3033. INSERT_BEFORE(&(PageCacheEntry->ListEntry),
  3034. &IoPageCacheCleanList);
  3035. continue;
  3036. }
  3037. }
  3038. //
  3039. // Add a reference to the entry, drop the list lock, and acquire the
  3040. // file object lock to prevent lock ordering trouble.
  3041. //
  3042. IoPageCacheEntryAddReference(PageCacheEntry);
  3043. KeReleaseQueuedLock(IoPageCacheListLock);
  3044. //
  3045. // Acquire the lock if not already acquired.
  3046. //
  3047. if (TimidEffort == FALSE) {
  3048. KeAcquireSharedExclusiveLockExclusive(Lock);
  3049. }
  3050. PageTakenDown = FALSE;
  3051. if (PageCacheEntry->ReferenceCount == 1) {
  3052. //
  3053. // Unmap this page cache entry from any image sections that may be
  3054. // mapping it. If the mappings note that the page is dirty, then
  3055. // mark it dirty and skip removing the page if it became dirty. The
  3056. // file object lock holds off any new mappings from getting at this
  3057. // entry. Unmapping a page cache entry can fail if a non-paged
  3058. // image section maps it.
  3059. //
  3060. Flags = PageCacheEntry->Flags;
  3061. Status = IopUnmapPageCacheEntrySections(PageCacheEntry,
  3062. &PageWasDirty);
  3063. if (KSUCCESS(Status)) {
  3064. if (PageWasDirty != FALSE) {
  3065. IopMarkPageCacheEntryDirty(PageCacheEntry);
  3066. Flags = PageCacheEntry->Flags;
  3067. }
  3068. if ((Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) == 0) {
  3069. //
  3070. // Make sure the cache entry is clean to keep the metrics
  3071. // correct.
  3072. //
  3073. IopMarkPageCacheEntryClean(PageCacheEntry, FALSE);
  3074. //
  3075. // Remove the node for the page cache entry tree if
  3076. // necessary.
  3077. //
  3078. if (PageCacheEntry->Node.Parent != NULL) {
  3079. IopRemovePageCacheEntryFromTree(PageCacheEntry);
  3080. }
  3081. PageTakenDown = TRUE;
  3082. //
  3083. // If this page cache entry owns its physical page, then it
  3084. // counts towards the removal count.
  3085. //
  3086. if ((Flags & PAGE_CACHE_ENTRY_FLAG_PAGE_OWNER) != 0) {
  3087. if (TargetRemoveCount != NULL) {
  3088. *TargetRemoveCount -= 1;
  3089. }
  3090. }
  3091. }
  3092. }
  3093. }
  3094. //
  3095. // Drop the file object lock and reacquire the list lock.
  3096. //
  3097. KeReleaseSharedExclusiveLockExclusive(Lock);
  3098. KeAcquireQueuedLock(IoPageCacheListLock);
  3099. //
  3100. // If the page was successfully destroyed, move it over to the destroy
  3101. // list.
  3102. //
  3103. if (PageTakenDown != FALSE) {
  3104. ASSERT((PageCacheEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) == 0);
  3105. if (PageCacheEntry->ListEntry.Next != NULL) {
  3106. LIST_REMOVE(&(PageCacheEntry->ListEntry));
  3107. }
  3108. INSERT_BEFORE(&(PageCacheEntry->ListEntry), DestroyListHead);
  3109. //
  3110. // Otherwise, either remove it from this list, or stick it on the
  3111. // end. The list assignment has to be correct because releasing
  3112. // the reference might try to stick it on the list if it sees its clean
  3113. // and not on one, but the list lock is already held here.
  3114. //
  3115. } else {
  3116. if ((PageCacheEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) == 0) {
  3117. if (PageCacheEntry->ListEntry.Next != NULL) {
  3118. LIST_REMOVE(&(PageCacheEntry->ListEntry));
  3119. }
  3120. INSERT_BEFORE(&(PageCacheEntry->ListEntry),
  3121. &IoPageCacheCleanList);
  3122. }
  3123. }
  3124. IoPageCacheEntryReleaseReference(PageCacheEntry);
  3125. }
  3126. //
  3127. // Stick any remainder back on list.
  3128. //
  3129. if (!LIST_EMPTY(&LocalList)) {
  3130. APPEND_LIST(&LocalList, PageCacheListHead);
  3131. }
  3132. KeReleaseQueuedLock(IoPageCacheListLock);
  3133. return;
  3134. }
  3135. VOID
  3136. IopTrimPageCacheVirtual (
  3137. BOOL TimidEffort
  3138. )
  3139. /*++
  3140. Routine Description:
  3141. This routine unmaps as many clean page cache entries as is necessary to
  3142. bring the number of mapped page cache entries back down to a reasonable
  3143. level. It unmaps page cache entires in LRU order.
  3144. Arguments:
  3145. TimidEffort - Supplies a boolean indicating whether or not this function
  3146. should only try once to acquire a file object lock before moving on.
  3147. Set this to TRUE if this thread might already be holding file object
  3148. locks.
  3149. Return Value:
  3150. None.
  3151. --*/
  3152. {
  3153. PLIST_ENTRY CurrentEntry;
  3154. PFILE_OBJECT FileObject;
  3155. UINTN FreeVirtualPages;
  3156. PSHARED_EXCLUSIVE_LOCK Lock;
  3157. UINTN MappedCleanPageCount;
  3158. PPAGE_CACHE_ENTRY PageCacheEntry;
  3159. ULONG PageSize;
  3160. LIST_ENTRY ReturnList;
  3161. UINTN TargetUnmapCount;
  3162. UINTN UnmapCount;
  3163. UINTN UnmapSize;
  3164. PVOID UnmapStart;
  3165. PVOID VirtualAddress;
  3166. TargetUnmapCount = 0;
  3167. FreeVirtualPages = -1;
  3168. if ((LIST_EMPTY(&IoPageCacheCleanList)) ||
  3169. (IopIsPageCacheTooMapped(&FreeVirtualPages) == FALSE)) {
  3170. return;
  3171. }
  3172. ASSERT(FreeVirtualPages != -1);
  3173. INITIALIZE_LIST_HEAD(&ReturnList);
  3174. //
  3175. // The page cache is not leaving enough free virtual memory; determine how
  3176. // many entries must be unmapped.
  3177. //
  3178. TargetUnmapCount = 0;
  3179. if (FreeVirtualPages < IoPageCacheHeadroomVirtualPagesRetreat) {
  3180. TargetUnmapCount = IoPageCacheHeadroomVirtualPagesRetreat -
  3181. FreeVirtualPages;
  3182. }
  3183. //
  3184. // Assert on the accounting numbers, but allow for a bit of transience.
  3185. //
  3186. ASSERT(IoPageCacheMappedDirtyPageCount <=
  3187. IoPageCacheMappedPageCount + 0x10);
  3188. ASSERT(IoPageCacheMappedDirtyPageCount <= IoPageCacheDirtyPageCount + 0x10);
  3189. MappedCleanPageCount = IoPageCacheMappedPageCount -
  3190. IoPageCacheMappedDirtyPageCount;
  3191. if (TargetUnmapCount > MappedCleanPageCount) {
  3192. TargetUnmapCount = MappedCleanPageCount;
  3193. }
  3194. if (TargetUnmapCount == 0) {
  3195. if (MmGetVirtualMemoryWarningLevel() == MemoryWarningLevelNone) {
  3196. return;
  3197. }
  3198. //
  3199. // Unmap some minimum number of pages before relying on the virtual
  3200. // warning to indicate when the coast is clear. This should hopefully
  3201. // build some headroom in fragmented cases.
  3202. //
  3203. TargetUnmapCount = IoPageCacheHeadroomVirtualPagesRetreat -
  3204. IoPageCacheHeadroomVirtualPagesTrigger;
  3205. }
  3206. if ((IoPageCacheDebugFlags & PAGE_CACHE_DEBUG_MAPPED_MANAGEMENT) != 0) {
  3207. RtlDebugPrint("PAGE CACHE: Attempt to unmap at least %lu entries.\n",
  3208. TargetUnmapCount);
  3209. }
  3210. //
  3211. // Iterate over the clean LRU page cache list trying to unmap page cache
  3212. // entries. Stop as soon as the target count has been reached.
  3213. //
  3214. UnmapStart = NULL;
  3215. UnmapSize = 0;
  3216. UnmapCount = 0;
  3217. PageSize = MmPageSize();
  3218. KeAcquireQueuedLock(IoPageCacheListLock);
  3219. while ((!LIST_EMPTY(&IoPageCacheCleanList)) &&
  3220. ((TargetUnmapCount != UnmapCount) ||
  3221. (MmGetVirtualMemoryWarningLevel() != MemoryWarningLevelNone))) {
  3222. CurrentEntry = IoPageCacheCleanList.Next;
  3223. PageCacheEntry = LIST_VALUE(CurrentEntry, PAGE_CACHE_ENTRY, ListEntry);
  3224. //
  3225. // Skip over all page cache entries with references, removing them from
  3226. // this list. They cannot be unmapped at the moment.
  3227. //
  3228. if (PageCacheEntry->ReferenceCount != 0) {
  3229. LIST_REMOVE(&(PageCacheEntry->ListEntry));
  3230. PageCacheEntry->ListEntry.Next = NULL;
  3231. //
  3232. // Double check the reference count. If it dropped to zero while
  3233. // the entry was being removed, it may not have observed the list
  3234. // entry being nulled out, and may not be waiting to put the entry
  3235. // back.
  3236. //
  3237. RtlMemoryBarrier();
  3238. if (PageCacheEntry->ReferenceCount == 0) {
  3239. INSERT_BEFORE(&(PageCacheEntry->ListEntry),
  3240. &IoPageCacheCleanList);
  3241. }
  3242. continue;
  3243. }
  3244. //
  3245. // If it's dirty, then there must be another thread that just marked it
  3246. // dirty but has yet to remove it from the list. Remove it and move on.
  3247. //
  3248. if ((PageCacheEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) != 0) {
  3249. LIST_REMOVE(&(PageCacheEntry->ListEntry));
  3250. PageCacheEntry->ListEntry.Next = NULL;
  3251. continue;
  3252. }
  3253. //
  3254. // If the page was not mapped, and is the page owner, move it over to
  3255. // the clean unmapped list to prevent iterating over it again during
  3256. // subsequent invocations of this function.
  3257. //
  3258. if ((PageCacheEntry->Flags &
  3259. (PAGE_CACHE_ENTRY_FLAG_MAPPED |
  3260. PAGE_CACHE_ENTRY_FLAG_PAGE_OWNER)) ==
  3261. PAGE_CACHE_ENTRY_FLAG_PAGE_OWNER) {
  3262. LIST_REMOVE(&(PageCacheEntry->ListEntry));
  3263. INSERT_BEFORE(&(PageCacheEntry->ListEntry),
  3264. &IoPageCacheCleanUnmappedList);
  3265. continue;
  3266. }
  3267. FileObject = PageCacheEntry->FileObject;
  3268. Lock = FileObject->Lock;
  3269. //
  3270. // For timid attempts, try to get the lock without dropping the list
  3271. // lock (since for a single attempt lock inversions are not an issue).
  3272. // If it fails, just move on in case this thread already owns the lock
  3273. // in question further up the stack.
  3274. //
  3275. if (TimidEffort != FALSE) {
  3276. if (KeTryToAcquireSharedExclusiveLockExclusive(Lock) == FALSE) {
  3277. LIST_REMOVE(&(PageCacheEntry->ListEntry));
  3278. INSERT_BEFORE(&(PageCacheEntry->ListEntry), &ReturnList);
  3279. continue;
  3280. }
  3281. }
  3282. //
  3283. // Add a reference to the page cache entry, drop the list lock, and
  3284. // acquire the file object lock to ensure no new references come in
  3285. // while the VA is being torn down.
  3286. //
  3287. IoPageCacheEntryAddReference(PageCacheEntry);
  3288. KeReleaseQueuedLock(IoPageCacheListLock);
  3289. if (TimidEffort == FALSE) {
  3290. KeAcquireSharedExclusiveLockExclusive(Lock);
  3291. }
  3292. IopRemovePageCacheEntryVirtualAddress(PageCacheEntry, &VirtualAddress);
  3293. if (VirtualAddress != NULL) {
  3294. UnmapCount += 1;
  3295. //
  3296. // If this page is not contiguous with the previous run, unmap the
  3297. // previous run.
  3298. //
  3299. if ((UnmapStart != NULL) &&
  3300. (VirtualAddress != (UnmapStart + UnmapSize))) {
  3301. MmUnmapAddress(UnmapStart, UnmapSize);
  3302. UnmapStart = NULL;
  3303. UnmapSize = 0;
  3304. }
  3305. //
  3306. // Either start a new run or append it to the previous run.
  3307. //
  3308. if (UnmapStart == NULL) {
  3309. UnmapStart = VirtualAddress;
  3310. }
  3311. UnmapSize += PageSize;
  3312. }
  3313. //
  3314. // Drop the file object lock and reacquire the list lock.
  3315. //
  3316. KeReleaseSharedExclusiveLockExclusive(Lock);
  3317. KeAcquireQueuedLock(IoPageCacheListLock);
  3318. if ((PageCacheEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) == 0) {
  3319. if (PageCacheEntry->ListEntry.Next != NULL) {
  3320. LIST_REMOVE(&(PageCacheEntry->ListEntry));
  3321. }
  3322. if (((PageCacheEntry->Flags &
  3323. PAGE_CACHE_ENTRY_FLAG_MAPPED) == 0) &&
  3324. (PageCacheEntry->BackingEntry == NULL)) {
  3325. INSERT_BEFORE(&(PageCacheEntry->ListEntry),
  3326. &IoPageCacheCleanUnmappedList);
  3327. } else {
  3328. INSERT_BEFORE(&(PageCacheEntry->ListEntry), &ReturnList);
  3329. }
  3330. }
  3331. IoPageCacheEntryReleaseReference(PageCacheEntry);
  3332. }
  3333. //
  3334. // Stick any entries whose locks couldn't be acquired back at the time
  3335. // back on the list.
  3336. //
  3337. if (!LIST_EMPTY(&ReturnList)) {
  3338. APPEND_LIST(&ReturnList, &IoPageCacheCleanList);
  3339. }
  3340. KeReleaseQueuedLock(IoPageCacheListLock);
  3341. //
  3342. // If there is a remaining region of contiguous virtual memory that needs
  3343. // to be unmapped, it can be done after releasing the lock as all of the
  3344. // page cache entries have already been updated to reflect being unmapped.
  3345. //
  3346. if (UnmapStart != NULL) {
  3347. MmUnmapAddress(UnmapStart, UnmapSize);
  3348. }
  3349. if (UnmapCount != 0) {
  3350. RtlAtomicAdd(&IoPageCacheMappedPageCount, -UnmapCount);
  3351. }
  3352. if ((IoPageCacheDebugFlags & PAGE_CACHE_DEBUG_MAPPED_MANAGEMENT) != 0) {
  3353. RtlDebugPrint("PAGE CACHE: Unmapped %lu entries.\n",
  3354. UnmapCount);
  3355. }
  3356. return;
  3357. }
  3358. BOOL
  3359. IopIsIoBufferPageCacheBackedHelper (
  3360. PFILE_OBJECT FileObject,
  3361. PIO_BUFFER IoBuffer,
  3362. IO_OFFSET Offset,
  3363. UINTN SizeInBytes
  3364. )
  3365. /*++
  3366. Routine Description:
  3367. This routine determines whether or not the given I/O buffer with data
  3368. targeting the given file object at the given offset is currently backed by
  3369. the page cache, up to the given size. The caller is expected to synchronize
  3370. with eviction via truncate.
  3371. Arguments:
  3372. FileObject - Supplies a pointer to a file object.
  3373. IoBuffer - Supplies a pointer to an I/O buffer.
  3374. Offset - Supplies an offset into the file or device object.
  3375. SizeInBytes - Supplies the number of bytes in the I/O buffer that should be
  3376. cache backed.
  3377. Return Value:
  3378. Returns TRUE if the I/O buffer is backed by valid page cache entries, or
  3379. FALSE otherwise.
  3380. --*/
  3381. {
  3382. UINTN BufferOffset;
  3383. PPAGE_CACHE_ENTRY PageCacheEntry;
  3384. ULONG PageSize;
  3385. PageSize = MmPageSize();
  3386. ASSERT(IS_ALIGNED(SizeInBytes, PageSize) != FALSE);
  3387. BufferOffset = 0;
  3388. while (SizeInBytes != 0) {
  3389. //
  3390. // If this page in the buffer is not backed by a page cache entry or
  3391. // not backed by the correct page cache entry, then return FALSE. Also
  3392. // return FALSE if the offsets do not agree.
  3393. //
  3394. PageCacheEntry = MmGetIoBufferPageCacheEntry(IoBuffer, BufferOffset);
  3395. if ((PageCacheEntry == NULL) ||
  3396. (PageCacheEntry->FileObject != FileObject) ||
  3397. (PageCacheEntry->Node.Parent == NULL) ||
  3398. (PageCacheEntry->Offset != Offset)) {
  3399. return FALSE;
  3400. }
  3401. SizeInBytes -= PageSize;
  3402. BufferOffset += PageSize;
  3403. Offset += PageSize;
  3404. }
  3405. return TRUE;
  3406. }
  3407. KSTATUS
  3408. IopUnmapPageCacheEntrySections (
  3409. PPAGE_CACHE_ENTRY PageCacheEntry,
  3410. PBOOL PageWasDirty
  3411. )
  3412. /*++
  3413. Routine Description:
  3414. This routine unmaps the physical page owned by the given page cache entry
  3415. from all the image sections that may have it mapped.
  3416. Arguments:
  3417. PageCacheEntry - Supplies a pointer to the page cache entry to be unmapped.
  3418. PageWasDirty - Supplies a pointer where a boolean will be returned
  3419. indicating if the page that was unmapped was dirty. This parameter is
  3420. optional.
  3421. Return Value:
  3422. Status code.
  3423. --*/
  3424. {
  3425. ULONG Flags;
  3426. PIMAGE_SECTION_LIST ImageSectionList;
  3427. KSTATUS Status;
  3428. //
  3429. // The page cache entry shouldn't be referenced by random I/O buffers
  3430. // because they could add mappings after this work is done. A reference
  3431. // count of 1 is accepted for link, which has a reference but isn't doing
  3432. // anything wild with it.
  3433. //
  3434. ASSERT(PageCacheEntry->ReferenceCount <= 1);
  3435. Status = STATUS_SUCCESS;
  3436. if (PageWasDirty != NULL) {
  3437. *PageWasDirty = FALSE;
  3438. }
  3439. ImageSectionList = PageCacheEntry->FileObject->ImageSectionList;
  3440. if (ImageSectionList != NULL) {
  3441. Flags = IMAGE_SECTION_UNMAP_FLAG_PAGE_CACHE_ONLY;
  3442. Status = MmUnmapImageSectionList(ImageSectionList,
  3443. PageCacheEntry->Offset,
  3444. MmPageSize(),
  3445. Flags,
  3446. PageWasDirty);
  3447. }
  3448. return Status;
  3449. }
  3450. KSTATUS
  3451. IopRemovePageCacheEntryVirtualAddress (
  3452. PPAGE_CACHE_ENTRY Entry,
  3453. PVOID *VirtualAddress
  3454. )
  3455. /*++
  3456. Routine Description:
  3457. This routine attmepts to separate a page cache entry from its associated
  3458. virtual address. It assumes the file object lock for this entry (but not
  3459. the backing entry if there is one) is held.
  3460. Arguments:
  3461. Entry - Supplies a pointer to the page cache entry.
  3462. VirtualAddress - Supplies a pointer where the virtual address to unmap will
  3463. be returned on success. NULL will be returned on failure or if there
  3464. was no VA.
  3465. Return Value:
  3466. STATUS_SUCCESS on success.
  3467. STATUS_RESOURCE_IN_USE if the page cache entry has references and cannot
  3468. be unmapped.
  3469. --*/
  3470. {
  3471. PPAGE_CACHE_ENTRY BackingEntry;
  3472. ULONG OldFlags;
  3473. KSTATUS Status;
  3474. ASSERT(KeIsSharedExclusiveLockHeldExclusive(Entry->FileObject->Lock));
  3475. Status = STATUS_RESOURCE_IN_USE;
  3476. *VirtualAddress = NULL;
  3477. BackingEntry = NULL;
  3478. if ((Entry->ReferenceCount != 1) ||
  3479. ((Entry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) != 0)) {
  3480. goto RemovePageCacheEntryVirtualAddressEnd;
  3481. }
  3482. //
  3483. // If this page cache entry owns the physical page, then it is not
  3484. // serving as a backing entry to any other page cache entry (as it has
  3485. // no references). Freely unmap it.
  3486. //
  3487. if ((Entry->Flags & PAGE_CACHE_ENTRY_FLAG_PAGE_OWNER) != 0) {
  3488. OldFlags = RtlAtomicAnd32(&(Entry->Flags),
  3489. ~PAGE_CACHE_ENTRY_FLAG_MAPPED);
  3490. //
  3491. // The page cache entry is not the owner, but it may be eligible for
  3492. // unmap if the owner only has 1 reference (from the backee).
  3493. //
  3494. } else {
  3495. //
  3496. // Grab the backing entry lock, too. Lock ordering shouldn't be a
  3497. // problem since files are always grabbed before block devices.
  3498. //
  3499. BackingEntry = Entry->BackingEntry;
  3500. ASSERT(BackingEntry != NULL);
  3501. KeAcquireSharedExclusiveLockExclusive(BackingEntry->FileObject->Lock);
  3502. ASSERT((Entry->VirtualAddress == NULL) ||
  3503. (BackingEntry->VirtualAddress ==
  3504. Entry->VirtualAddress));
  3505. if ((BackingEntry->ReferenceCount != 1) ||
  3506. ((BackingEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) != 0)) {
  3507. goto RemovePageCacheEntryVirtualAddressEnd;
  3508. }
  3509. //
  3510. // Only the owner should be marked mapped or dirty.
  3511. //
  3512. ASSERT((Entry->Flags &
  3513. (PAGE_CACHE_ENTRY_FLAG_MAPPED |
  3514. PAGE_CACHE_ENTRY_FLAG_DIRTY)) == 0);
  3515. OldFlags = RtlAtomicAnd32(&(BackingEntry->Flags),
  3516. ~PAGE_CACHE_ENTRY_FLAG_MAPPED);
  3517. }
  3518. if ((OldFlags & PAGE_CACHE_ENTRY_FLAG_MAPPED) != 0) {
  3519. if (BackingEntry != NULL) {
  3520. *VirtualAddress = BackingEntry->VirtualAddress;
  3521. BackingEntry->VirtualAddress = NULL;
  3522. } else {
  3523. *VirtualAddress = Entry->VirtualAddress;
  3524. }
  3525. Entry->VirtualAddress = NULL;
  3526. //
  3527. // If the unmapped page was also dirty, decrement the count. The
  3528. // mapped page count is not decremented because it's assumed the caller
  3529. // will do that (potentially in bulk).
  3530. //
  3531. if ((OldFlags & PAGE_CACHE_ENTRY_FLAG_DIRTY) != 0) {
  3532. RtlAtomicAdd(&IoPageCacheMappedDirtyPageCount, (UINTN)-1);
  3533. }
  3534. }
  3535. Status = STATUS_SUCCESS;
  3536. RemovePageCacheEntryVirtualAddressEnd:
  3537. if (BackingEntry != NULL) {
  3538. KeReleaseSharedExclusiveLockExclusive(BackingEntry->FileObject->Lock);
  3539. }
  3540. return Status;
  3541. }
  3542. VOID
  3543. IopRemovePageCacheEntryFromTree (
  3544. PPAGE_CACHE_ENTRY PageCacheEntry
  3545. )
  3546. /*++
  3547. Routine Description:
  3548. This routine removes a page cache entry from the page cache tree. This
  3549. routine assumes that the page cache's tree lock is held exclusively.
  3550. Arguments:
  3551. PageCacheEntry - Supplies a pointer to the page cache entry to be removed.
  3552. Return Value:
  3553. None.
  3554. --*/
  3555. {
  3556. ASSERT(KeIsSharedExclusiveLockHeldExclusive(
  3557. PageCacheEntry->FileObject->Lock));
  3558. ASSERT(PageCacheEntry->Node.Parent != NULL);
  3559. //
  3560. // If a backing entry exists, then MM needs to know that the backing entry
  3561. // now owns the page. It may have always been the owner, but just make sure.
  3562. //
  3563. if (PageCacheEntry->BackingEntry != NULL) {
  3564. MmSetPageCacheEntryForPhysicalAddress(PageCacheEntry->PhysicalAddress,
  3565. PageCacheEntry->BackingEntry);
  3566. }
  3567. RtlRedBlackTreeRemove(&(PageCacheEntry->FileObject->PageCacheTree),
  3568. &(PageCacheEntry->Node));
  3569. PageCacheEntry->Node.Parent = NULL;
  3570. IoPageCacheEntryCount -= 1;
  3571. if ((IoPageCacheDebugFlags & PAGE_CACHE_DEBUG_EVICTION) != 0) {
  3572. RtlDebugPrint("PAGE CACHE: Remove entry 0x%08x: file object "
  3573. "0x%08x, offset 0x%I64x, physical address "
  3574. "0x%I64x, reference count %d, flags 0x%08x.\n",
  3575. PageCacheEntry,
  3576. PageCacheEntry->FileObject,
  3577. PageCacheEntry->Offset,
  3578. PageCacheEntry->PhysicalAddress,
  3579. PageCacheEntry->ReferenceCount,
  3580. PageCacheEntry->Flags);
  3581. }
  3582. return;
  3583. }
  3584. VOID
  3585. IopUpdatePageCacheEntryList (
  3586. PPAGE_CACHE_ENTRY PageCacheEntry,
  3587. BOOL Created
  3588. )
  3589. /*++
  3590. Routine Description:
  3591. This routine updates a page cache entry's list entry by putting it on the
  3592. appropriate list. This should be used when a page cache entry is looked up
  3593. or when it is created.
  3594. Arguments:
  3595. PageCacheEntry - Supplies a pointer to the page cache entry whose list
  3596. entry needs to be updated.
  3597. Created - Supplies a boolean indicating if the page cache entry was just
  3598. created or not.
  3599. Return Value:
  3600. None.
  3601. --*/
  3602. {
  3603. KeAcquireQueuedLock(IoPageCacheListLock);
  3604. //
  3605. // If the page cache entry is not new, then it might already be on a
  3606. // list. If it's on a clean list, move it to the back. If it's clean
  3607. // and not on a list, then it probably got ripped off the list because
  3608. // there are references on it.
  3609. //
  3610. if (Created == FALSE) {
  3611. //
  3612. // If it's dirty, it should always be on the dirty list.
  3613. //
  3614. ASSERT(((PageCacheEntry->Flags &
  3615. PAGE_CACHE_ENTRY_FLAG_DIRTY) == 0) ||
  3616. (PageCacheEntry->ListEntry.Next != NULL));
  3617. if (((PageCacheEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) == 0) &&
  3618. (PageCacheEntry->ListEntry.Next != NULL)) {
  3619. LIST_REMOVE(&(PageCacheEntry->ListEntry));
  3620. INSERT_BEFORE(&(PageCacheEntry->ListEntry),
  3621. &IoPageCacheCleanList);
  3622. }
  3623. //
  3624. // New pages do not start on a list. Stick it on the back of the clean
  3625. // list.
  3626. //
  3627. } else {
  3628. ASSERT(PageCacheEntry->ListEntry.Next == NULL);
  3629. ASSERT((PageCacheEntry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) == 0);
  3630. INSERT_BEFORE(&(PageCacheEntry->ListEntry), &IoPageCacheCleanList);
  3631. }
  3632. KeReleaseQueuedLock(IoPageCacheListLock);
  3633. return;
  3634. }
  3635. BOOL
  3636. IopIsPageCacheTooBig (
  3637. PUINTN FreePhysicalPages
  3638. )
  3639. /*++
  3640. Routine Description:
  3641. This routine determines if the page cache is too large given current
  3642. memory constraints.
  3643. Arguments:
  3644. FreePhysicalPages - Supplies an optional pointer where the number of free
  3645. physical pages used at the time of computation will be returned. This
  3646. will only be returned if the page cache is reported to be too big.
  3647. Return Value:
  3648. TRUE if the page cache is too big and should shrink.
  3649. FALSE if the page cache is too small or just right.
  3650. --*/
  3651. {
  3652. UINTN FreePages;
  3653. //
  3654. // Don't let the page cache shrink too much. If it's already below the
  3655. // minimum just skip it (but leave the target remove count set so that
  3656. // paging out is requested). Otherwise, clip the remove count to avoid
  3657. // going below the minimum.
  3658. //
  3659. if (IoPageCachePhysicalPageCount <= IoPageCacheMinimumPages) {
  3660. return FALSE;
  3661. }
  3662. //
  3663. // Get the current number of free pages in the system, and determine if the
  3664. // page cache still has room to grow.
  3665. //
  3666. FreePages = MmGetTotalFreePhysicalPages();
  3667. if (FreePages > IoPageCacheHeadroomPagesTrigger) {
  3668. return FALSE;
  3669. }
  3670. if (FreePhysicalPages != NULL) {
  3671. *FreePhysicalPages = FreePages;
  3672. }
  3673. return TRUE;
  3674. }
  3675. BOOL
  3676. IopIsPageCacheTooMapped (
  3677. PUINTN FreeVirtualPages
  3678. )
  3679. /*++
  3680. Routine Description:
  3681. This routine determines if the page cache has too many mapped entries given
  3682. current memory constraints.
  3683. Arguments:
  3684. FreeVirtualPages - Supplies an optional pointer where the number of free
  3685. virtual pages at the time of computation will be returned. This will
  3686. only be returned if the page cache is determined to have too many
  3687. entries mapped.
  3688. Return Value:
  3689. TRUE if the page cache has too many mapped entries and some should be
  3690. unmapped.
  3691. FALSE if the page cache does not have too many entries mapped.
  3692. --*/
  3693. {
  3694. UINTN FreePages;
  3695. //
  3696. // Get the current number of free virtual pages in system memory and
  3697. // determine if the page cache still has room to grow.
  3698. //
  3699. FreePages = MmGetFreeVirtualMemory() >> MmPageShift();
  3700. if ((FreePages > IoPageCacheHeadroomVirtualPagesTrigger) &&
  3701. (MmGetVirtualMemoryWarningLevel() == MemoryWarningLevelNone)) {
  3702. return FALSE;
  3703. }
  3704. //
  3705. // Check to make sure at least a single page cache entry is mapped.
  3706. //
  3707. if (IoPageCacheMappedPageCount == 0) {
  3708. return FALSE;
  3709. }
  3710. if (FreeVirtualPages != NULL) {
  3711. *FreeVirtualPages = FreePages;
  3712. }
  3713. return TRUE;
  3714. }
  3715. VOID
  3716. IopCheckFileObjectPageCache (
  3717. PFILE_OBJECT FileObject
  3718. )
  3719. /*++
  3720. Routine Description:
  3721. This routine checks the given file object page cache for consistency.
  3722. Arguments:
  3723. FileObject - Supplies a pointer to the file object to check.
  3724. Return Value:
  3725. None.
  3726. --*/
  3727. {
  3728. PLIST_ENTRY CurrentEntry;
  3729. PPAGE_CACHE_ENTRY Entry;
  3730. PRED_BLACK_TREE_NODE TreeNode;
  3731. ASSERT(KeIsSharedExclusiveLockHeld(FileObject->Lock));
  3732. KeAcquireQueuedLock(IoPageCacheListLock);
  3733. TreeNode = RtlRedBlackTreeGetLowestNode(&(FileObject->PageCacheTree));
  3734. while (TreeNode != NULL) {
  3735. Entry = RED_BLACK_TREE_VALUE(TreeNode, PAGE_CACHE_ENTRY, Node);
  3736. if ((Entry->Flags & PAGE_CACHE_ENTRY_FLAG_DIRTY) != 0) {
  3737. if (Entry->ListEntry.Next == NULL) {
  3738. RtlDebugPrint("PAGE_CACHE_ENTRY 0x%x for FILE_OBJECT 0x%x "
  3739. "Offset 0x%I64x dirty but not in list.\n",
  3740. Entry,
  3741. FileObject,
  3742. Entry->Offset);
  3743. } else {
  3744. CurrentEntry = FileObject->DirtyPageList.Next;
  3745. while ((CurrentEntry != &(FileObject->DirtyPageList)) &&
  3746. (CurrentEntry != &(Entry->ListEntry))) {
  3747. CurrentEntry = CurrentEntry->Next;
  3748. }
  3749. if (CurrentEntry != &(Entry->ListEntry)) {
  3750. RtlDebugPrint("PAGE_CACHE_ENTRY 0x%x for FILE_OBJECT 0x%x "
  3751. "Offset 0x%I64x dirty but not in dirty "
  3752. "list.\n",
  3753. Entry,
  3754. FileObject,
  3755. Entry->Offset);
  3756. }
  3757. }
  3758. }
  3759. TreeNode = RtlRedBlackTreeGetNextNode(&(FileObject->PageCacheTree),
  3760. FALSE,
  3761. TreeNode);
  3762. }
  3763. KeReleaseQueuedLock(IoPageCacheListLock);
  3764. return;
  3765. }