001-fix_make_headers_install.patch 100 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932
  1. From 5eac4d66049ab7d14a2b7311610c8cb85a2c1bf1 Mon Sep 17 00:00:00 2001
  2. From: Nicolas Thill <nico@openwrt.org>
  3. Date: Fri, 20 Mar 2015 00:31:06 +0100
  4. Subject: [PATCH] UM: fix make headers_install after UAPI header installation
  5. Signed-off-by: Nicolas Thill <nico@openwrt.org>
  6. ---
  7. From faec6b6c2cc0219e74569c13f581fc11d8f3fc57 Mon Sep 17 00:00:00 2001
  8. From: Florian Fainelli <florian@openwrt.org>
  9. Date: Sun, 17 Mar 2013 20:12:10 +0100
  10. Subject: [PATCH] UM: fix make headers_install after UAPI header installation
  11. Commit 10b63956 (UAPI: Plumb the UAPI Kbuilds into the user
  12. header installation and checking) breaks UML make headers_install with
  13. the following:
  14. $ ARCH=um make headers_install
  15. CHK include/generated/uapi/linux/version.h
  16. UPD include/generated/uapi/linux/version.h
  17. HOSTCC scripts/basic/fixdep
  18. WRAP arch/um/include/generated/asm/bug.h
  19. [snip]
  20. WRAP arch/um/include/generated/asm/trace_clock.h
  21. SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_32.h
  22. SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_64.h
  23. SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_x32.h
  24. SYSTBL arch/x86/syscalls/../include/generated/asm/syscalls_32.h
  25. HOSTCC scripts/unifdef
  26. Makefile:912: *** Headers not exportable for the um architecture. Stop.
  27. zsh: exit 2 ARCH=um make headers_install
  28. The reason for that is because the top-level Makefile does the
  29. following:
  30. $(if $(wildcard $(srctree)/arch/$(hdr-arch)/include/uapi/asm/Kbuild),, \
  31. $(error Headers not exportable for the $(SRCARCH) architecture))
  32. we end-up in the else part of the $(if) statement because UML still uses
  33. the old path in arch/um/include/asm/Kbuild. This patch fixes the issue
  34. by moving the header files to be in arch/um/include/uapi/asm/ thus
  35. making headers_install (and other make targets checking for uapi) to
  36. succeed.
  37. Signed-off-by: Florian Fainelli <florian@openwrt.org>
  38. ---
  39. Richard, this has been broken for 3.7+ onwards, if you want me to send
  40. you separate patches for 3.7 and 3.8 let me know. Thanks!
  41. --- a/arch/um/include/asm/Kbuild
  42. +++ /dev/null
  43. @@ -1,30 +0,0 @@
  44. -generic-y += barrier.h
  45. -generic-y += bug.h
  46. -generic-y += clkdev.h
  47. -generic-y += cputime.h
  48. -generic-y += current.h
  49. -generic-y += delay.h
  50. -generic-y += device.h
  51. -generic-y += emergency-restart.h
  52. -generic-y += exec.h
  53. -generic-y += ftrace.h
  54. -generic-y += futex.h
  55. -generic-y += hardirq.h
  56. -generic-y += hash.h
  57. -generic-y += hw_irq.h
  58. -generic-y += io.h
  59. -generic-y += irq_regs.h
  60. -generic-y += irq_work.h
  61. -generic-y += kdebug.h
  62. -generic-y += mcs_spinlock.h
  63. -generic-y += mutex.h
  64. -generic-y += param.h
  65. -generic-y += pci.h
  66. -generic-y += percpu.h
  67. -generic-y += preempt.h
  68. -generic-y += scatterlist.h
  69. -generic-y += sections.h
  70. -generic-y += switch_to.h
  71. -generic-y += topology.h
  72. -generic-y += trace_clock.h
  73. -generic-y += xor.h
  74. --- a/arch/um/include/asm/a.out-core.h
  75. +++ /dev/null
  76. @@ -1,27 +0,0 @@
  77. -/* a.out coredump register dumper
  78. - *
  79. - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  80. - * Written by David Howells (dhowells@redhat.com)
  81. - *
  82. - * This program is free software; you can redistribute it and/or
  83. - * modify it under the terms of the GNU General Public Licence
  84. - * as published by the Free Software Foundation; either version
  85. - * 2 of the Licence, or (at your option) any later version.
  86. - */
  87. -
  88. -#ifndef __UM_A_OUT_CORE_H
  89. -#define __UM_A_OUT_CORE_H
  90. -
  91. -#ifdef __KERNEL__
  92. -
  93. -#include <linux/user.h>
  94. -
  95. -/*
  96. - * fill in the user structure for an a.out core dump
  97. - */
  98. -static inline void aout_dump_thread(struct pt_regs *regs, struct user *u)
  99. -{
  100. -}
  101. -
  102. -#endif /* __KERNEL__ */
  103. -#endif /* __UM_A_OUT_CORE_H */
  104. --- a/arch/um/include/asm/bugs.h
  105. +++ /dev/null
  106. @@ -1,6 +0,0 @@
  107. -#ifndef __UM_BUGS_H
  108. -#define __UM_BUGS_H
  109. -
  110. -void check_bugs(void);
  111. -
  112. -#endif
  113. --- a/arch/um/include/asm/cache.h
  114. +++ /dev/null
  115. @@ -1,17 +0,0 @@
  116. -#ifndef __UM_CACHE_H
  117. -#define __UM_CACHE_H
  118. -
  119. -
  120. -#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
  121. -# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
  122. -#elif defined(CONFIG_UML_X86) /* 64-bit */
  123. -# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
  124. -#else
  125. -/* XXX: this was taken from x86, now it's completely random. Luckily only
  126. - * affects SMP padding. */
  127. -# define L1_CACHE_SHIFT 5
  128. -#endif
  129. -
  130. -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
  131. -
  132. -#endif
  133. --- a/arch/um/include/asm/common.lds.S
  134. +++ /dev/null
  135. @@ -1,107 +0,0 @@
  136. -#include <asm-generic/vmlinux.lds.h>
  137. -
  138. - .fini : { *(.fini) } =0x9090
  139. - _etext = .;
  140. - PROVIDE (etext = .);
  141. -
  142. - . = ALIGN(4096);
  143. - _sdata = .;
  144. - PROVIDE (sdata = .);
  145. -
  146. - RODATA
  147. -
  148. - .unprotected : { *(.unprotected) }
  149. - . = ALIGN(4096);
  150. - PROVIDE (_unprotected_end = .);
  151. -
  152. - . = ALIGN(4096);
  153. - .note : { *(.note.*) }
  154. - EXCEPTION_TABLE(0)
  155. -
  156. - BUG_TABLE
  157. -
  158. - .uml.setup.init : {
  159. - __uml_setup_start = .;
  160. - *(.uml.setup.init)
  161. - __uml_setup_end = .;
  162. - }
  163. -
  164. - .uml.help.init : {
  165. - __uml_help_start = .;
  166. - *(.uml.help.init)
  167. - __uml_help_end = .;
  168. - }
  169. -
  170. - .uml.postsetup.init : {
  171. - __uml_postsetup_start = .;
  172. - *(.uml.postsetup.init)
  173. - __uml_postsetup_end = .;
  174. - }
  175. -
  176. - .init.setup : {
  177. - INIT_SETUP(0)
  178. - }
  179. -
  180. - PERCPU_SECTION(32)
  181. -
  182. - .initcall.init : {
  183. - INIT_CALLS
  184. - }
  185. -
  186. - .con_initcall.init : {
  187. - CON_INITCALL
  188. - }
  189. -
  190. - .uml.initcall.init : {
  191. - __uml_initcall_start = .;
  192. - *(.uml.initcall.init)
  193. - __uml_initcall_end = .;
  194. - }
  195. -
  196. - SECURITY_INIT
  197. -
  198. - .exitcall : {
  199. - __exitcall_begin = .;
  200. - *(.exitcall.exit)
  201. - __exitcall_end = .;
  202. - }
  203. -
  204. - .uml.exitcall : {
  205. - __uml_exitcall_begin = .;
  206. - *(.uml.exitcall.exit)
  207. - __uml_exitcall_end = .;
  208. - }
  209. -
  210. - . = ALIGN(4);
  211. - .altinstructions : {
  212. - __alt_instructions = .;
  213. - *(.altinstructions)
  214. - __alt_instructions_end = .;
  215. - }
  216. - .altinstr_replacement : { *(.altinstr_replacement) }
  217. - /* .exit.text is discard at runtime, not link time, to deal with references
  218. - from .altinstructions and .eh_frame */
  219. - .exit.text : { *(.exit.text) }
  220. - .exit.data : { *(.exit.data) }
  221. -
  222. - .preinit_array : {
  223. - __preinit_array_start = .;
  224. - *(.preinit_array)
  225. - __preinit_array_end = .;
  226. - }
  227. - .init_array : {
  228. - __init_array_start = .;
  229. - *(.init_array)
  230. - __init_array_end = .;
  231. - }
  232. - .fini_array : {
  233. - __fini_array_start = .;
  234. - *(.fini_array)
  235. - __fini_array_end = .;
  236. - }
  237. -
  238. - . = ALIGN(4096);
  239. - .init.ramfs : {
  240. - INIT_RAM_FS
  241. - }
  242. -
  243. --- a/arch/um/include/asm/dma.h
  244. +++ /dev/null
  245. @@ -1,10 +0,0 @@
  246. -#ifndef __UM_DMA_H
  247. -#define __UM_DMA_H
  248. -
  249. -#include <asm/io.h>
  250. -
  251. -extern unsigned long uml_physmem;
  252. -
  253. -#define MAX_DMA_ADDRESS (uml_physmem)
  254. -
  255. -#endif
  256. --- a/arch/um/include/asm/fixmap.h
  257. +++ /dev/null
  258. @@ -1,60 +0,0 @@
  259. -#ifndef __UM_FIXMAP_H
  260. -#define __UM_FIXMAP_H
  261. -
  262. -#include <asm/processor.h>
  263. -#include <asm/kmap_types.h>
  264. -#include <asm/archparam.h>
  265. -#include <asm/page.h>
  266. -#include <linux/threads.h>
  267. -
  268. -/*
  269. - * Here we define all the compile-time 'special' virtual
  270. - * addresses. The point is to have a constant address at
  271. - * compile time, but to set the physical address only
  272. - * in the boot process. We allocate these special addresses
  273. - * from the end of virtual memory (0xfffff000) backwards.
  274. - * Also this lets us do fail-safe vmalloc(), we
  275. - * can guarantee that these special addresses and
  276. - * vmalloc()-ed addresses never overlap.
  277. - *
  278. - * these 'compile-time allocated' memory buffers are
  279. - * fixed-size 4k pages. (or larger if used with an increment
  280. - * highger than 1) use fixmap_set(idx,phys) to associate
  281. - * physical memory with fixmap indices.
  282. - *
  283. - * TLB entries of such buffers will not be flushed across
  284. - * task switches.
  285. - */
  286. -
  287. -/*
  288. - * on UP currently we will have no trace of the fixmap mechanizm,
  289. - * no page table allocations, etc. This might change in the
  290. - * future, say framebuffers for the console driver(s) could be
  291. - * fix-mapped?
  292. - */
  293. -enum fixed_addresses {
  294. -#ifdef CONFIG_HIGHMEM
  295. - FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
  296. - FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
  297. -#endif
  298. - __end_of_fixed_addresses
  299. -};
  300. -
  301. -extern void __set_fixmap (enum fixed_addresses idx,
  302. - unsigned long phys, pgprot_t flags);
  303. -
  304. -/*
  305. - * used by vmalloc.c.
  306. - *
  307. - * Leave one empty page between vmalloc'ed areas and
  308. - * the start of the fixmap, and leave one page empty
  309. - * at the top of mem..
  310. - */
  311. -
  312. -#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
  313. -#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
  314. -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
  315. -
  316. -#include <asm-generic/fixmap.h>
  317. -
  318. -#endif
  319. --- a/arch/um/include/asm/irq.h
  320. +++ /dev/null
  321. @@ -1,23 +0,0 @@
  322. -#ifndef __UM_IRQ_H
  323. -#define __UM_IRQ_H
  324. -
  325. -#define TIMER_IRQ 0
  326. -#define UMN_IRQ 1
  327. -#define CONSOLE_IRQ 2
  328. -#define CONSOLE_WRITE_IRQ 3
  329. -#define UBD_IRQ 4
  330. -#define UM_ETH_IRQ 5
  331. -#define SSL_IRQ 6
  332. -#define SSL_WRITE_IRQ 7
  333. -#define ACCEPT_IRQ 8
  334. -#define MCONSOLE_IRQ 9
  335. -#define WINCH_IRQ 10
  336. -#define SIGIO_WRITE_IRQ 11
  337. -#define TELNETD_IRQ 12
  338. -#define XTERM_IRQ 13
  339. -#define RANDOM_IRQ 14
  340. -
  341. -#define LAST_IRQ RANDOM_IRQ
  342. -#define NR_IRQS (LAST_IRQ + 1)
  343. -
  344. -#endif
  345. --- a/arch/um/include/asm/irqflags.h
  346. +++ /dev/null
  347. @@ -1,42 +0,0 @@
  348. -#ifndef __UM_IRQFLAGS_H
  349. -#define __UM_IRQFLAGS_H
  350. -
  351. -extern int get_signals(void);
  352. -extern int set_signals(int enable);
  353. -extern void block_signals(void);
  354. -extern void unblock_signals(void);
  355. -
  356. -static inline unsigned long arch_local_save_flags(void)
  357. -{
  358. - return get_signals();
  359. -}
  360. -
  361. -static inline void arch_local_irq_restore(unsigned long flags)
  362. -{
  363. - set_signals(flags);
  364. -}
  365. -
  366. -static inline void arch_local_irq_enable(void)
  367. -{
  368. - unblock_signals();
  369. -}
  370. -
  371. -static inline void arch_local_irq_disable(void)
  372. -{
  373. - block_signals();
  374. -}
  375. -
  376. -static inline unsigned long arch_local_irq_save(void)
  377. -{
  378. - unsigned long flags;
  379. - flags = arch_local_save_flags();
  380. - arch_local_irq_disable();
  381. - return flags;
  382. -}
  383. -
  384. -static inline bool arch_irqs_disabled(void)
  385. -{
  386. - return arch_local_save_flags() == 0;
  387. -}
  388. -
  389. -#endif
  390. --- a/arch/um/include/asm/kmap_types.h
  391. +++ /dev/null
  392. @@ -1,13 +0,0 @@
  393. -/*
  394. - * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
  395. - * Licensed under the GPL
  396. - */
  397. -
  398. -#ifndef __UM_KMAP_TYPES_H
  399. -#define __UM_KMAP_TYPES_H
  400. -
  401. -/* No more #include "asm/arch/kmap_types.h" ! */
  402. -
  403. -#define KM_TYPE_NR 14
  404. -
  405. -#endif
  406. --- a/arch/um/include/asm/kvm_para.h
  407. +++ /dev/null
  408. @@ -1 +0,0 @@
  409. -#include <asm-generic/kvm_para.h>
  410. --- a/arch/um/include/asm/mmu.h
  411. +++ /dev/null
  412. @@ -1,24 +0,0 @@
  413. -/*
  414. - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  415. - * Licensed under the GPL
  416. - */
  417. -
  418. -#ifndef __ARCH_UM_MMU_H
  419. -#define __ARCH_UM_MMU_H
  420. -
  421. -#include <mm_id.h>
  422. -#include <asm/mm_context.h>
  423. -
  424. -typedef struct mm_context {
  425. - struct mm_id id;
  426. - struct uml_arch_mm_context arch;
  427. - struct page *stub_pages[2];
  428. -} mm_context_t;
  429. -
  430. -extern void __switch_mm(struct mm_id * mm_idp);
  431. -
  432. -/* Avoid tangled inclusion with asm/ldt.h */
  433. -extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
  434. -extern void free_ldt(struct mm_context *mm);
  435. -
  436. -#endif
  437. --- a/arch/um/include/asm/mmu_context.h
  438. +++ /dev/null
  439. @@ -1,58 +0,0 @@
  440. -/*
  441. - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  442. - * Licensed under the GPL
  443. - */
  444. -
  445. -#ifndef __UM_MMU_CONTEXT_H
  446. -#define __UM_MMU_CONTEXT_H
  447. -
  448. -#include <linux/sched.h>
  449. -#include <asm/mmu.h>
  450. -
  451. -extern void uml_setup_stubs(struct mm_struct *mm);
  452. -extern void arch_exit_mmap(struct mm_struct *mm);
  453. -
  454. -#define deactivate_mm(tsk,mm) do { } while (0)
  455. -
  456. -extern void force_flush_all(void);
  457. -
  458. -static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
  459. -{
  460. - /*
  461. - * This is called by fs/exec.c and sys_unshare()
  462. - * when the new ->mm is used for the first time.
  463. - */
  464. - __switch_mm(&new->context.id);
  465. - down_write(&new->mmap_sem);
  466. - uml_setup_stubs(new);
  467. - up_write(&new->mmap_sem);
  468. -}
  469. -
  470. -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  471. - struct task_struct *tsk)
  472. -{
  473. - unsigned cpu = smp_processor_id();
  474. -
  475. - if(prev != next){
  476. - cpumask_clear_cpu(cpu, mm_cpumask(prev));
  477. - cpumask_set_cpu(cpu, mm_cpumask(next));
  478. - if(next != &init_mm)
  479. - __switch_mm(&next->context.id);
  480. - }
  481. -}
  482. -
  483. -static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
  484. -{
  485. - uml_setup_stubs(mm);
  486. -}
  487. -
  488. -static inline void enter_lazy_tlb(struct mm_struct *mm,
  489. - struct task_struct *tsk)
  490. -{
  491. -}
  492. -
  493. -extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
  494. -
  495. -extern void destroy_context(struct mm_struct *mm);
  496. -
  497. -#endif
  498. --- a/arch/um/include/asm/page.h
  499. +++ /dev/null
  500. @@ -1,127 +0,0 @@
  501. -/*
  502. - * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
  503. - * Copyright 2003 PathScale, Inc.
  504. - * Licensed under the GPL
  505. - */
  506. -
  507. -#ifndef __UM_PAGE_H
  508. -#define __UM_PAGE_H
  509. -
  510. -#include <linux/const.h>
  511. -
  512. -/* PAGE_SHIFT determines the page size */
  513. -#define PAGE_SHIFT 12
  514. -#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
  515. -#define PAGE_MASK (~(PAGE_SIZE-1))
  516. -
  517. -#ifndef __ASSEMBLY__
  518. -
  519. -struct page;
  520. -
  521. -#include <linux/types.h>
  522. -#include <asm/vm-flags.h>
  523. -
  524. -/*
  525. - * These are used to make use of C type-checking..
  526. - */
  527. -
  528. -#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
  529. -#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
  530. -
  531. -#define clear_user_page(page, vaddr, pg) clear_page(page)
  532. -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
  533. -
  534. -#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
  535. -
  536. -typedef struct { unsigned long pte_low, pte_high; } pte_t;
  537. -typedef struct { unsigned long pmd; } pmd_t;
  538. -typedef struct { unsigned long pgd; } pgd_t;
  539. -#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
  540. -
  541. -#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
  542. -#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
  543. -#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
  544. -#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
  545. - smp_wmb(); \
  546. - (to).pte_low = (from).pte_low; })
  547. -#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
  548. -#define pte_set_val(pte, phys, prot) \
  549. - ({ (pte).pte_high = (phys) >> 32; \
  550. - (pte).pte_low = (phys) | pgprot_val(prot); })
  551. -
  552. -#define pmd_val(x) ((x).pmd)
  553. -#define __pmd(x) ((pmd_t) { (x) } )
  554. -
  555. -typedef unsigned long long pfn_t;
  556. -typedef unsigned long long phys_t;
  557. -
  558. -#else
  559. -
  560. -typedef struct { unsigned long pte; } pte_t;
  561. -typedef struct { unsigned long pgd; } pgd_t;
  562. -
  563. -#ifdef CONFIG_3_LEVEL_PGTABLES
  564. -typedef struct { unsigned long pmd; } pmd_t;
  565. -#define pmd_val(x) ((x).pmd)
  566. -#define __pmd(x) ((pmd_t) { (x) } )
  567. -#endif
  568. -
  569. -#define pte_val(x) ((x).pte)
  570. -
  571. -
  572. -#define pte_get_bits(p, bits) ((p).pte & (bits))
  573. -#define pte_set_bits(p, bits) ((p).pte |= (bits))
  574. -#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
  575. -#define pte_copy(to, from) ((to).pte = (from).pte)
  576. -#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
  577. -#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
  578. -
  579. -typedef unsigned long pfn_t;
  580. -typedef unsigned long phys_t;
  581. -
  582. -#endif
  583. -
  584. -typedef struct { unsigned long pgprot; } pgprot_t;
  585. -
  586. -typedef struct page *pgtable_t;
  587. -
  588. -#define pgd_val(x) ((x).pgd)
  589. -#define pgprot_val(x) ((x).pgprot)
  590. -
  591. -#define __pte(x) ((pte_t) { (x) } )
  592. -#define __pgd(x) ((pgd_t) { (x) } )
  593. -#define __pgprot(x) ((pgprot_t) { (x) } )
  594. -
  595. -extern unsigned long uml_physmem;
  596. -
  597. -#define PAGE_OFFSET (uml_physmem)
  598. -#define KERNELBASE PAGE_OFFSET
  599. -
  600. -#define __va_space (8*1024*1024)
  601. -
  602. -#include <mem.h>
  603. -
  604. -/* Cast to unsigned long before casting to void * to avoid a warning from
  605. - * mmap_kmem about cutting a long long down to a void *. Not sure that
  606. - * casting is the right thing, but 32-bit UML can't have 64-bit virtual
  607. - * addresses
  608. - */
  609. -#define __pa(virt) to_phys((void *) (unsigned long) (virt))
  610. -#define __va(phys) to_virt((unsigned long) (phys))
  611. -
  612. -#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
  613. -#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
  614. -
  615. -#define pfn_valid(pfn) ((pfn) < max_mapnr)
  616. -#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
  617. -
  618. -#include <asm-generic/memory_model.h>
  619. -#include <asm-generic/getorder.h>
  620. -
  621. -#endif /* __ASSEMBLY__ */
  622. -
  623. -#ifdef CONFIG_X86_32
  624. -#define __HAVE_ARCH_GATE_AREA 1
  625. -#endif
  626. -
  627. -#endif /* __UM_PAGE_H */
  628. --- a/arch/um/include/asm/pgalloc.h
  629. +++ /dev/null
  630. @@ -1,61 +0,0 @@
  631. -/*
  632. - * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
  633. - * Copyright 2003 PathScale, Inc.
  634. - * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
  635. - * Licensed under the GPL
  636. - */
  637. -
  638. -#ifndef __UM_PGALLOC_H
  639. -#define __UM_PGALLOC_H
  640. -
  641. -#include <linux/mm.h>
  642. -
  643. -#define pmd_populate_kernel(mm, pmd, pte) \
  644. - set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
  645. -
  646. -#define pmd_populate(mm, pmd, pte) \
  647. - set_pmd(pmd, __pmd(_PAGE_TABLE + \
  648. - ((unsigned long long)page_to_pfn(pte) << \
  649. - (unsigned long long) PAGE_SHIFT)))
  650. -#define pmd_pgtable(pmd) pmd_page(pmd)
  651. -
  652. -/*
  653. - * Allocate and free page tables.
  654. - */
  655. -extern pgd_t *pgd_alloc(struct mm_struct *);
  656. -extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
  657. -
  658. -extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
  659. -extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
  660. -
  661. -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  662. -{
  663. - free_page((unsigned long) pte);
  664. -}
  665. -
  666. -static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
  667. -{
  668. - pgtable_page_dtor(pte);
  669. - __free_page(pte);
  670. -}
  671. -
  672. -#define __pte_free_tlb(tlb,pte, address) \
  673. -do { \
  674. - pgtable_page_dtor(pte); \
  675. - tlb_remove_page((tlb),(pte)); \
  676. -} while (0)
  677. -
  678. -#ifdef CONFIG_3_LEVEL_PGTABLES
  679. -
  680. -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  681. -{
  682. - free_page((unsigned long)pmd);
  683. -}
  684. -
  685. -#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
  686. -#endif
  687. -
  688. -#define check_pgt_cache() do { } while (0)
  689. -
  690. -#endif
  691. -
  692. --- a/arch/um/include/asm/pgtable-2level.h
  693. +++ /dev/null
  694. @@ -1,53 +0,0 @@
  695. -/*
  696. - * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
  697. - * Copyright 2003 PathScale, Inc.
  698. - * Derived from include/asm-i386/pgtable.h
  699. - * Licensed under the GPL
  700. - */
  701. -
  702. -#ifndef __UM_PGTABLE_2LEVEL_H
  703. -#define __UM_PGTABLE_2LEVEL_H
  704. -
  705. -#include <asm-generic/pgtable-nopmd.h>
  706. -
  707. -/* PGDIR_SHIFT determines what a third-level page table entry can map */
  708. -
  709. -#define PGDIR_SHIFT 22
  710. -#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  711. -#define PGDIR_MASK (~(PGDIR_SIZE-1))
  712. -
  713. -/*
  714. - * entries per page directory level: the i386 is two-level, so
  715. - * we don't really have any PMD directory physically.
  716. - */
  717. -#define PTRS_PER_PTE 1024
  718. -#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
  719. -#define PTRS_PER_PGD 1024
  720. -#define FIRST_USER_ADDRESS 0
  721. -
  722. -#define pte_ERROR(e) \
  723. - printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
  724. - pte_val(e))
  725. -#define pgd_ERROR(e) \
  726. - printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
  727. - pgd_val(e))
  728. -
  729. -static inline int pgd_newpage(pgd_t pgd) { return 0; }
  730. -static inline void pgd_mkuptodate(pgd_t pgd) { }
  731. -
  732. -#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
  733. -
  734. -#define pte_pfn(x) phys_to_pfn(pte_val(x))
  735. -#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
  736. -#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
  737. -
  738. -/*
  739. - * Bits 0 through 4 are taken
  740. - */
  741. -#define PTE_FILE_MAX_BITS 27
  742. -
  743. -#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
  744. -
  745. -#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
  746. -
  747. -#endif
  748. --- a/arch/um/include/asm/pgtable-3level.h
  749. +++ /dev/null
  750. @@ -1,136 +0,0 @@
  751. -/*
  752. - * Copyright 2003 PathScale Inc
  753. - * Derived from include/asm-i386/pgtable.h
  754. - * Licensed under the GPL
  755. - */
  756. -
  757. -#ifndef __UM_PGTABLE_3LEVEL_H
  758. -#define __UM_PGTABLE_3LEVEL_H
  759. -
  760. -#include <asm-generic/pgtable-nopud.h>
  761. -
  762. -/* PGDIR_SHIFT determines what a third-level page table entry can map */
  763. -
  764. -#ifdef CONFIG_64BIT
  765. -#define PGDIR_SHIFT 30
  766. -#else
  767. -#define PGDIR_SHIFT 31
  768. -#endif
  769. -#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  770. -#define PGDIR_MASK (~(PGDIR_SIZE-1))
  771. -
  772. -/* PMD_SHIFT determines the size of the area a second-level page table can
  773. - * map
  774. - */
  775. -
  776. -#define PMD_SHIFT 21
  777. -#define PMD_SIZE (1UL << PMD_SHIFT)
  778. -#define PMD_MASK (~(PMD_SIZE-1))
  779. -
  780. -/*
  781. - * entries per page directory level
  782. - */
  783. -
  784. -#define PTRS_PER_PTE 512
  785. -#ifdef CONFIG_64BIT
  786. -#define PTRS_PER_PMD 512
  787. -#define PTRS_PER_PGD 512
  788. -#else
  789. -#define PTRS_PER_PMD 1024
  790. -#define PTRS_PER_PGD 1024
  791. -#endif
  792. -
  793. -#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
  794. -#define FIRST_USER_ADDRESS 0
  795. -
  796. -#define pte_ERROR(e) \
  797. - printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
  798. - pte_val(e))
  799. -#define pmd_ERROR(e) \
  800. - printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
  801. - pmd_val(e))
  802. -#define pgd_ERROR(e) \
  803. - printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
  804. - pgd_val(e))
  805. -
  806. -#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
  807. -#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
  808. -#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
  809. -#define pud_populate(mm, pud, pmd) \
  810. - set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
  811. -
  812. -#ifdef CONFIG_64BIT
  813. -#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
  814. -#else
  815. -#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
  816. -#endif
  817. -
  818. -static inline int pgd_newpage(pgd_t pgd)
  819. -{
  820. - return(pgd_val(pgd) & _PAGE_NEWPAGE);
  821. -}
  822. -
  823. -static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
  824. -
  825. -#ifdef CONFIG_64BIT
  826. -#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
  827. -#else
  828. -#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
  829. -#endif
  830. -
  831. -struct mm_struct;
  832. -extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
  833. -
  834. -static inline void pud_clear (pud_t *pud)
  835. -{
  836. - set_pud(pud, __pud(_PAGE_NEWPAGE));
  837. -}
  838. -
  839. -#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
  840. -#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
  841. -
  842. -/* Find an entry in the second-level page table.. */
  843. -#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
  844. - pmd_index(address))
  845. -
  846. -static inline unsigned long pte_pfn(pte_t pte)
  847. -{
  848. - return phys_to_pfn(pte_val(pte));
  849. -}
  850. -
  851. -static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
  852. -{
  853. - pte_t pte;
  854. - phys_t phys = pfn_to_phys(page_nr);
  855. -
  856. - pte_set_val(pte, phys, pgprot);
  857. - return pte;
  858. -}
  859. -
  860. -static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
  861. -{
  862. - return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
  863. -}
  864. -
  865. -/*
  866. - * Bits 0 through 3 are taken in the low part of the pte,
  867. - * put the 32 bits of offset into the high part.
  868. - */
  869. -#define PTE_FILE_MAX_BITS 32
  870. -
  871. -#ifdef CONFIG_64BIT
  872. -
  873. -#define pte_to_pgoff(p) ((p).pte >> 32)
  874. -
  875. -#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
  876. -
  877. -#else
  878. -
  879. -#define pte_to_pgoff(pte) ((pte).pte_high)
  880. -
  881. -#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
  882. -
  883. -#endif
  884. -
  885. -#endif
  886. -
  887. --- a/arch/um/include/asm/pgtable.h
  888. +++ /dev/null
  889. @@ -1,375 +0,0 @@
  890. -/*
  891. - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  892. - * Copyright 2003 PathScale, Inc.
  893. - * Derived from include/asm-i386/pgtable.h
  894. - * Licensed under the GPL
  895. - */
  896. -
  897. -#ifndef __UM_PGTABLE_H
  898. -#define __UM_PGTABLE_H
  899. -
  900. -#include <asm/fixmap.h>
  901. -
  902. -#define _PAGE_PRESENT 0x001
  903. -#define _PAGE_NEWPAGE 0x002
  904. -#define _PAGE_NEWPROT 0x004
  905. -#define _PAGE_RW 0x020
  906. -#define _PAGE_USER 0x040
  907. -#define _PAGE_ACCESSED 0x080
  908. -#define _PAGE_DIRTY 0x100
  909. -/* If _PAGE_PRESENT is clear, we use these: */
  910. -#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
  911. -#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
  912. - pte_present gives true */
  913. -
  914. -#ifdef CONFIG_3_LEVEL_PGTABLES
  915. -#include <asm/pgtable-3level.h>
  916. -#else
  917. -#include <asm/pgtable-2level.h>
  918. -#endif
  919. -
  920. -extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  921. -
  922. -/* zero page used for uninitialized stuff */
  923. -extern unsigned long *empty_zero_page;
  924. -
  925. -#define pgtable_cache_init() do ; while (0)
  926. -
  927. -/* Just any arbitrary offset to the start of the vmalloc VM area: the
  928. - * current 8MB value just means that there will be a 8MB "hole" after the
  929. - * physical memory until the kernel virtual memory starts. That means that
  930. - * any out-of-bounds memory accesses will hopefully be caught.
  931. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  932. - * area for the same reason. ;)
  933. - */
  934. -
  935. -extern unsigned long end_iomem;
  936. -
  937. -#define VMALLOC_OFFSET (__va_space)
  938. -#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
  939. -#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
  940. -#ifdef CONFIG_HIGHMEM
  941. -# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
  942. -#else
  943. -# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
  944. -#endif
  945. -#define MODULES_VADDR VMALLOC_START
  946. -#define MODULES_END VMALLOC_END
  947. -#define MODULES_LEN (MODULES_VADDR - MODULES_END)
  948. -
  949. -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
  950. -#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
  951. -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
  952. -#define __PAGE_KERNEL_EXEC \
  953. - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
  954. -#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
  955. -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
  956. -#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
  957. -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
  958. -#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
  959. -#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
  960. -
  961. -/*
  962. - * The i386 can't do page protection for execute, and considers that the same
  963. - * are read.
  964. - * Also, write permissions imply read permissions. This is the closest we can
  965. - * get..
  966. - */
  967. -#define __P000 PAGE_NONE
  968. -#define __P001 PAGE_READONLY
  969. -#define __P010 PAGE_COPY
  970. -#define __P011 PAGE_COPY
  971. -#define __P100 PAGE_READONLY
  972. -#define __P101 PAGE_READONLY
  973. -#define __P110 PAGE_COPY
  974. -#define __P111 PAGE_COPY
  975. -
  976. -#define __S000 PAGE_NONE
  977. -#define __S001 PAGE_READONLY
  978. -#define __S010 PAGE_SHARED
  979. -#define __S011 PAGE_SHARED
  980. -#define __S100 PAGE_READONLY
  981. -#define __S101 PAGE_READONLY
  982. -#define __S110 PAGE_SHARED
  983. -#define __S111 PAGE_SHARED
  984. -
  985. -/*
  986. - * ZERO_PAGE is a global shared page that is always zero: used
  987. - * for zero-mapped memory areas etc..
  988. - */
  989. -#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
  990. -
  991. -#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
  992. -
  993. -#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
  994. -#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
  995. -
  996. -#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
  997. -#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
  998. -
  999. -#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
  1000. -#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
  1001. -
  1002. -#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
  1003. -#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
  1004. -
  1005. -#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
  1006. -
  1007. -#define pte_page(x) pfn_to_page(pte_pfn(x))
  1008. -
  1009. -#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
  1010. -
  1011. -/*
  1012. - * =================================
  1013. - * Flags checking section.
  1014. - * =================================
  1015. - */
  1016. -
  1017. -static inline int pte_none(pte_t pte)
  1018. -{
  1019. - return pte_is_zero(pte);
  1020. -}
  1021. -
  1022. -/*
  1023. - * The following only work if pte_present() is true.
  1024. - * Undefined behaviour if not..
  1025. - */
  1026. -static inline int pte_read(pte_t pte)
  1027. -{
  1028. - return((pte_get_bits(pte, _PAGE_USER)) &&
  1029. - !(pte_get_bits(pte, _PAGE_PROTNONE)));
  1030. -}
  1031. -
  1032. -static inline int pte_exec(pte_t pte){
  1033. - return((pte_get_bits(pte, _PAGE_USER)) &&
  1034. - !(pte_get_bits(pte, _PAGE_PROTNONE)));
  1035. -}
  1036. -
  1037. -static inline int pte_write(pte_t pte)
  1038. -{
  1039. - return((pte_get_bits(pte, _PAGE_RW)) &&
  1040. - !(pte_get_bits(pte, _PAGE_PROTNONE)));
  1041. -}
  1042. -
  1043. -/*
  1044. - * The following only works if pte_present() is not true.
  1045. - */
  1046. -static inline int pte_file(pte_t pte)
  1047. -{
  1048. - return pte_get_bits(pte, _PAGE_FILE);
  1049. -}
  1050. -
  1051. -static inline int pte_dirty(pte_t pte)
  1052. -{
  1053. - return pte_get_bits(pte, _PAGE_DIRTY);
  1054. -}
  1055. -
  1056. -static inline int pte_young(pte_t pte)
  1057. -{
  1058. - return pte_get_bits(pte, _PAGE_ACCESSED);
  1059. -}
  1060. -
  1061. -static inline int pte_newpage(pte_t pte)
  1062. -{
  1063. - return pte_get_bits(pte, _PAGE_NEWPAGE);
  1064. -}
  1065. -
  1066. -static inline int pte_newprot(pte_t pte)
  1067. -{
  1068. - return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
  1069. -}
  1070. -
  1071. -static inline int pte_special(pte_t pte)
  1072. -{
  1073. - return 0;
  1074. -}
  1075. -
  1076. -/*
  1077. - * =================================
  1078. - * Flags setting section.
  1079. - * =================================
  1080. - */
  1081. -
  1082. -static inline pte_t pte_mknewprot(pte_t pte)
  1083. -{
  1084. - pte_set_bits(pte, _PAGE_NEWPROT);
  1085. - return(pte);
  1086. -}
  1087. -
  1088. -static inline pte_t pte_mkclean(pte_t pte)
  1089. -{
  1090. - pte_clear_bits(pte, _PAGE_DIRTY);
  1091. - return(pte);
  1092. -}
  1093. -
  1094. -static inline pte_t pte_mkold(pte_t pte)
  1095. -{
  1096. - pte_clear_bits(pte, _PAGE_ACCESSED);
  1097. - return(pte);
  1098. -}
  1099. -
  1100. -static inline pte_t pte_wrprotect(pte_t pte)
  1101. -{
  1102. - pte_clear_bits(pte, _PAGE_RW);
  1103. - return(pte_mknewprot(pte));
  1104. -}
  1105. -
  1106. -static inline pte_t pte_mkread(pte_t pte)
  1107. -{
  1108. - pte_set_bits(pte, _PAGE_USER);
  1109. - return(pte_mknewprot(pte));
  1110. -}
  1111. -
  1112. -static inline pte_t pte_mkdirty(pte_t pte)
  1113. -{
  1114. - pte_set_bits(pte, _PAGE_DIRTY);
  1115. - return(pte);
  1116. -}
  1117. -
  1118. -static inline pte_t pte_mkyoung(pte_t pte)
  1119. -{
  1120. - pte_set_bits(pte, _PAGE_ACCESSED);
  1121. - return(pte);
  1122. -}
  1123. -
  1124. -static inline pte_t pte_mkwrite(pte_t pte)
  1125. -{
  1126. - pte_set_bits(pte, _PAGE_RW);
  1127. - return(pte_mknewprot(pte));
  1128. -}
  1129. -
  1130. -static inline pte_t pte_mkuptodate(pte_t pte)
  1131. -{
  1132. - pte_clear_bits(pte, _PAGE_NEWPAGE);
  1133. - if(pte_present(pte))
  1134. - pte_clear_bits(pte, _PAGE_NEWPROT);
  1135. - return(pte);
  1136. -}
  1137. -
  1138. -static inline pte_t pte_mknewpage(pte_t pte)
  1139. -{
  1140. - pte_set_bits(pte, _PAGE_NEWPAGE);
  1141. - return(pte);
  1142. -}
  1143. -
  1144. -static inline pte_t pte_mkspecial(pte_t pte)
  1145. -{
  1146. - return(pte);
  1147. -}
  1148. -
  1149. -static inline void set_pte(pte_t *pteptr, pte_t pteval)
  1150. -{
  1151. - pte_copy(*pteptr, pteval);
  1152. -
  1153. - /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
  1154. - * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
  1155. - * mapped pages.
  1156. - */
  1157. -
  1158. - *pteptr = pte_mknewpage(*pteptr);
  1159. - if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
  1160. -}
  1161. -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
  1162. -
  1163. -#define __HAVE_ARCH_PTE_SAME
  1164. -static inline int pte_same(pte_t pte_a, pte_t pte_b)
  1165. -{
  1166. - return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
  1167. -}
  1168. -
  1169. -/*
  1170. - * Conversion functions: convert a page and protection to a page entry,
  1171. - * and a page entry and page directory to the page they refer to.
  1172. - */
  1173. -
  1174. -#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
  1175. -#define __virt_to_page(virt) phys_to_page(__pa(virt))
  1176. -#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
  1177. -#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
  1178. -
  1179. -#define mk_pte(page, pgprot) \
  1180. - ({ pte_t pte; \
  1181. - \
  1182. - pte_set_val(pte, page_to_phys(page), (pgprot)); \
  1183. - if (pte_present(pte)) \
  1184. - pte_mknewprot(pte_mknewpage(pte)); \
  1185. - pte;})
  1186. -
  1187. -static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  1188. -{
  1189. - pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
  1190. - return pte;
  1191. -}
  1192. -
  1193. -/*
  1194. - * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  1195. - *
  1196. - * this macro returns the index of the entry in the pgd page which would
  1197. - * control the given virtual address
  1198. - */
  1199. -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
  1200. -
  1201. -/*
  1202. - * pgd_offset() returns a (pgd_t *)
  1203. - * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  1204. - */
  1205. -#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
  1206. -
  1207. -/*
  1208. - * a shortcut which implies the use of the kernel's pgd, instead
  1209. - * of a process's
  1210. - */
  1211. -#define pgd_offset_k(address) pgd_offset(&init_mm, address)
  1212. -
  1213. -/*
  1214. - * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
  1215. - *
  1216. - * this macro returns the index of the entry in the pmd page which would
  1217. - * control the given virtual address
  1218. - */
  1219. -#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
  1220. -#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
  1221. -
  1222. -#define pmd_page_vaddr(pmd) \
  1223. - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
  1224. -
  1225. -/*
  1226. - * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
  1227. - *
  1228. - * this macro returns the index of the entry in the pte page which would
  1229. - * control the given virtual address
  1230. - */
  1231. -#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  1232. -#define pte_offset_kernel(dir, address) \
  1233. - ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
  1234. -#define pte_offset_map(dir, address) \
  1235. - ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
  1236. -#define pte_unmap(pte) do { } while (0)
  1237. -
  1238. -struct mm_struct;
  1239. -extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
  1240. -
  1241. -#define update_mmu_cache(vma,address,ptep) do ; while (0)
  1242. -
  1243. -/* Encode and de-code a swap entry */
  1244. -#define __swp_type(x) (((x).val >> 5) & 0x1f)
  1245. -#define __swp_offset(x) ((x).val >> 11)
  1246. -
  1247. -#define __swp_entry(type, offset) \
  1248. - ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
  1249. -#define __pte_to_swp_entry(pte) \
  1250. - ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
  1251. -#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  1252. -
  1253. -#define kern_addr_valid(addr) (1)
  1254. -
  1255. -#include <asm-generic/pgtable.h>
  1256. -
  1257. -/* Clear a kernel PTE and flush it from the TLB */
  1258. -#define kpte_clear_flush(ptep, vaddr) \
  1259. -do { \
  1260. - pte_clear(&init_mm, (vaddr), (ptep)); \
  1261. - __flush_tlb_one((vaddr)); \
  1262. -} while (0)
  1263. -
  1264. -#endif
  1265. --- a/arch/um/include/asm/processor-generic.h
  1266. +++ /dev/null
  1267. @@ -1,115 +0,0 @@
  1268. -/*
  1269. - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  1270. - * Licensed under the GPL
  1271. - */
  1272. -
  1273. -#ifndef __UM_PROCESSOR_GENERIC_H
  1274. -#define __UM_PROCESSOR_GENERIC_H
  1275. -
  1276. -struct pt_regs;
  1277. -
  1278. -struct task_struct;
  1279. -
  1280. -#include <asm/ptrace.h>
  1281. -#include <registers.h>
  1282. -#include <sysdep/archsetjmp.h>
  1283. -
  1284. -#include <linux/prefetch.h>
  1285. -
  1286. -struct mm_struct;
  1287. -
  1288. -struct thread_struct {
  1289. - struct pt_regs regs;
  1290. - struct pt_regs *segv_regs;
  1291. - int singlestep_syscall;
  1292. - void *fault_addr;
  1293. - jmp_buf *fault_catcher;
  1294. - struct task_struct *prev_sched;
  1295. - struct arch_thread arch;
  1296. - jmp_buf switch_buf;
  1297. - struct {
  1298. - int op;
  1299. - union {
  1300. - struct {
  1301. - int pid;
  1302. - } fork, exec;
  1303. - struct {
  1304. - int (*proc)(void *);
  1305. - void *arg;
  1306. - } thread;
  1307. - struct {
  1308. - void (*proc)(void *);
  1309. - void *arg;
  1310. - } cb;
  1311. - } u;
  1312. - } request;
  1313. -};
  1314. -
  1315. -#define INIT_THREAD \
  1316. -{ \
  1317. - .regs = EMPTY_REGS, \
  1318. - .fault_addr = NULL, \
  1319. - .prev_sched = NULL, \
  1320. - .arch = INIT_ARCH_THREAD, \
  1321. - .request = { 0 } \
  1322. -}
  1323. -
  1324. -static inline void release_thread(struct task_struct *task)
  1325. -{
  1326. -}
  1327. -
  1328. -extern unsigned long thread_saved_pc(struct task_struct *t);
  1329. -
  1330. -static inline void mm_copy_segments(struct mm_struct *from_mm,
  1331. - struct mm_struct *new_mm)
  1332. -{
  1333. -}
  1334. -
  1335. -#define init_stack (init_thread_union.stack)
  1336. -
  1337. -/*
  1338. - * User space process size: 3GB (default).
  1339. - */
  1340. -extern unsigned long task_size;
  1341. -
  1342. -#define TASK_SIZE (task_size)
  1343. -
  1344. -#undef STACK_TOP
  1345. -#undef STACK_TOP_MAX
  1346. -
  1347. -extern unsigned long stacksizelim;
  1348. -
  1349. -#define STACK_ROOM (stacksizelim)
  1350. -#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
  1351. -#define STACK_TOP_MAX STACK_TOP
  1352. -
  1353. -/* This decides where the kernel will search for a free chunk of vm
  1354. - * space during mmap's.
  1355. - */
  1356. -#define TASK_UNMAPPED_BASE (0x40000000)
  1357. -
  1358. -extern void start_thread(struct pt_regs *regs, unsigned long entry,
  1359. - unsigned long stack);
  1360. -
  1361. -struct cpuinfo_um {
  1362. - unsigned long loops_per_jiffy;
  1363. - int ipi_pipe[2];
  1364. -};
  1365. -
  1366. -extern struct cpuinfo_um boot_cpu_data;
  1367. -
  1368. -#define my_cpu_data cpu_data[smp_processor_id()]
  1369. -
  1370. -#ifdef CONFIG_SMP
  1371. -extern struct cpuinfo_um cpu_data[];
  1372. -#define current_cpu_data cpu_data[smp_processor_id()]
  1373. -#else
  1374. -#define cpu_data (&boot_cpu_data)
  1375. -#define current_cpu_data boot_cpu_data
  1376. -#endif
  1377. -
  1378. -
  1379. -#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
  1380. -extern unsigned long get_wchan(struct task_struct *p);
  1381. -
  1382. -#endif
  1383. --- a/arch/um/include/asm/ptrace-generic.h
  1384. +++ /dev/null
  1385. @@ -1,45 +0,0 @@
  1386. -/*
  1387. - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  1388. - * Licensed under the GPL
  1389. - */
  1390. -
  1391. -#ifndef __UM_PTRACE_GENERIC_H
  1392. -#define __UM_PTRACE_GENERIC_H
  1393. -
  1394. -#ifndef __ASSEMBLY__
  1395. -
  1396. -#include <asm/ptrace-abi.h>
  1397. -#include <sysdep/ptrace.h>
  1398. -
  1399. -struct pt_regs {
  1400. - struct uml_pt_regs regs;
  1401. -};
  1402. -
  1403. -#define arch_has_single_step() (1)
  1404. -
  1405. -#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
  1406. -
  1407. -#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
  1408. -#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
  1409. -
  1410. -#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs)
  1411. -
  1412. -#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs)
  1413. -
  1414. -#define instruction_pointer(regs) PT_REGS_IP(regs)
  1415. -
  1416. -struct task_struct;
  1417. -
  1418. -extern long subarch_ptrace(struct task_struct *child, long request,
  1419. - unsigned long addr, unsigned long data);
  1420. -extern unsigned long getreg(struct task_struct *child, int regno);
  1421. -extern int putreg(struct task_struct *child, int regno, unsigned long value);
  1422. -
  1423. -extern int arch_copy_tls(struct task_struct *new);
  1424. -extern void clear_flushed_tls(struct task_struct *task);
  1425. -extern void syscall_trace_enter(struct pt_regs *regs);
  1426. -extern void syscall_trace_leave(struct pt_regs *regs);
  1427. -
  1428. -#endif
  1429. -
  1430. -#endif
  1431. --- a/arch/um/include/asm/setup.h
  1432. +++ /dev/null
  1433. @@ -1,10 +0,0 @@
  1434. -#ifndef SETUP_H_INCLUDED
  1435. -#define SETUP_H_INCLUDED
  1436. -
  1437. -/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the
  1438. - * command line, so this choice is ok.
  1439. - */
  1440. -
  1441. -#define COMMAND_LINE_SIZE 4096
  1442. -
  1443. -#endif /* SETUP_H_INCLUDED */
  1444. --- a/arch/um/include/asm/smp.h
  1445. +++ /dev/null
  1446. @@ -1,32 +0,0 @@
  1447. -#ifndef __UM_SMP_H
  1448. -#define __UM_SMP_H
  1449. -
  1450. -#ifdef CONFIG_SMP
  1451. -
  1452. -#include <linux/bitops.h>
  1453. -#include <asm/current.h>
  1454. -#include <linux/cpumask.h>
  1455. -
  1456. -#define raw_smp_processor_id() (current_thread->cpu)
  1457. -
  1458. -#define cpu_logical_map(n) (n)
  1459. -#define cpu_number_map(n) (n)
  1460. -extern int hard_smp_processor_id(void);
  1461. -#define NO_PROC_ID -1
  1462. -
  1463. -extern int ncpus;
  1464. -
  1465. -
  1466. -static inline void smp_cpus_done(unsigned int maxcpus)
  1467. -{
  1468. -}
  1469. -
  1470. -extern struct task_struct *idle_threads[NR_CPUS];
  1471. -
  1472. -#else
  1473. -
  1474. -#define hard_smp_processor_id() 0
  1475. -
  1476. -#endif
  1477. -
  1478. -#endif
  1479. --- a/arch/um/include/asm/stacktrace.h
  1480. +++ /dev/null
  1481. @@ -1,42 +0,0 @@
  1482. -#ifndef _ASM_UML_STACKTRACE_H
  1483. -#define _ASM_UML_STACKTRACE_H
  1484. -
  1485. -#include <linux/uaccess.h>
  1486. -#include <linux/ptrace.h>
  1487. -
  1488. -struct stack_frame {
  1489. - struct stack_frame *next_frame;
  1490. - unsigned long return_address;
  1491. -};
  1492. -
  1493. -struct stacktrace_ops {
  1494. - void (*address)(void *data, unsigned long address, int reliable);
  1495. -};
  1496. -
  1497. -#ifdef CONFIG_FRAME_POINTER
  1498. -static inline unsigned long
  1499. -get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
  1500. -{
  1501. - if (!task || task == current)
  1502. - return segv_regs ? PT_REGS_BP(segv_regs) : current_bp();
  1503. - return KSTK_EBP(task);
  1504. -}
  1505. -#else
  1506. -static inline unsigned long
  1507. -get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
  1508. -{
  1509. - return 0;
  1510. -}
  1511. -#endif
  1512. -
  1513. -static inline unsigned long
  1514. -*get_stack_pointer(struct task_struct *task, struct pt_regs *segv_regs)
  1515. -{
  1516. - if (!task || task == current)
  1517. - return segv_regs ? (unsigned long *)PT_REGS_SP(segv_regs) : current_sp();
  1518. - return (unsigned long *)KSTK_ESP(task);
  1519. -}
  1520. -
  1521. -void dump_trace(struct task_struct *tsk, const struct stacktrace_ops *ops, void *data);
  1522. -
  1523. -#endif /* _ASM_UML_STACKTRACE_H */
  1524. --- a/arch/um/include/asm/sysrq.h
  1525. +++ /dev/null
  1526. @@ -1,7 +0,0 @@
  1527. -#ifndef __UM_SYSRQ_H
  1528. -#define __UM_SYSRQ_H
  1529. -
  1530. -struct task_struct;
  1531. -extern void show_trace(struct task_struct* task, unsigned long *stack);
  1532. -
  1533. -#endif
  1534. --- a/arch/um/include/asm/thread_info.h
  1535. +++ /dev/null
  1536. @@ -1,78 +0,0 @@
  1537. -/*
  1538. - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  1539. - * Licensed under the GPL
  1540. - */
  1541. -
  1542. -#ifndef __UM_THREAD_INFO_H
  1543. -#define __UM_THREAD_INFO_H
  1544. -
  1545. -#ifndef __ASSEMBLY__
  1546. -
  1547. -#include <asm/types.h>
  1548. -#include <asm/page.h>
  1549. -#include <asm/uaccess.h>
  1550. -
  1551. -struct thread_info {
  1552. - struct task_struct *task; /* main task structure */
  1553. - struct exec_domain *exec_domain; /* execution domain */
  1554. - unsigned long flags; /* low level flags */
  1555. - __u32 cpu; /* current CPU */
  1556. - int preempt_count; /* 0 => preemptable,
  1557. - <0 => BUG */
  1558. - mm_segment_t addr_limit; /* thread address space:
  1559. - 0-0xBFFFFFFF for user
  1560. - 0-0xFFFFFFFF for kernel */
  1561. - struct restart_block restart_block;
  1562. - struct thread_info *real_thread; /* Points to non-IRQ stack */
  1563. -};
  1564. -
  1565. -#define INIT_THREAD_INFO(tsk) \
  1566. -{ \
  1567. - .task = &tsk, \
  1568. - .exec_domain = &default_exec_domain, \
  1569. - .flags = 0, \
  1570. - .cpu = 0, \
  1571. - .preempt_count = INIT_PREEMPT_COUNT, \
  1572. - .addr_limit = KERNEL_DS, \
  1573. - .restart_block = { \
  1574. - .fn = do_no_restart_syscall, \
  1575. - }, \
  1576. - .real_thread = NULL, \
  1577. -}
  1578. -
  1579. -#define init_thread_info (init_thread_union.thread_info)
  1580. -#define init_stack (init_thread_union.stack)
  1581. -
  1582. -#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
  1583. -/* how to get the thread information struct from C */
  1584. -static inline struct thread_info *current_thread_info(void)
  1585. -{
  1586. - struct thread_info *ti;
  1587. - unsigned long mask = THREAD_SIZE - 1;
  1588. - void *p;
  1589. -
  1590. - asm volatile ("" : "=r" (p) : "0" (&ti));
  1591. - ti = (struct thread_info *) (((unsigned long)p) & ~mask);
  1592. - return ti;
  1593. -}
  1594. -
  1595. -#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
  1596. -
  1597. -#endif
  1598. -
  1599. -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
  1600. -#define TIF_SIGPENDING 1 /* signal pending */
  1601. -#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
  1602. -#define TIF_RESTART_BLOCK 4
  1603. -#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
  1604. -#define TIF_SYSCALL_AUDIT 6
  1605. -#define TIF_RESTORE_SIGMASK 7
  1606. -#define TIF_NOTIFY_RESUME 8
  1607. -
  1608. -#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
  1609. -#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
  1610. -#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
  1611. -#define _TIF_MEMDIE (1 << TIF_MEMDIE)
  1612. -#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
  1613. -
  1614. -#endif
  1615. --- a/arch/um/include/asm/timex.h
  1616. +++ /dev/null
  1617. @@ -1,13 +0,0 @@
  1618. -#ifndef __UM_TIMEX_H
  1619. -#define __UM_TIMEX_H
  1620. -
  1621. -typedef unsigned long cycles_t;
  1622. -
  1623. -static inline cycles_t get_cycles (void)
  1624. -{
  1625. - return 0;
  1626. -}
  1627. -
  1628. -#define CLOCK_TICK_RATE (HZ)
  1629. -
  1630. -#endif
  1631. --- a/arch/um/include/asm/tlb.h
  1632. +++ /dev/null
  1633. @@ -1,134 +0,0 @@
  1634. -#ifndef __UM_TLB_H
  1635. -#define __UM_TLB_H
  1636. -
  1637. -#include <linux/pagemap.h>
  1638. -#include <linux/swap.h>
  1639. -#include <asm/percpu.h>
  1640. -#include <asm/pgalloc.h>
  1641. -#include <asm/tlbflush.h>
  1642. -
  1643. -#define tlb_start_vma(tlb, vma) do { } while (0)
  1644. -#define tlb_end_vma(tlb, vma) do { } while (0)
  1645. -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
  1646. -
  1647. -/* struct mmu_gather is an opaque type used by the mm code for passing around
  1648. - * any data needed by arch specific code for tlb_remove_page.
  1649. - */
  1650. -struct mmu_gather {
  1651. - struct mm_struct *mm;
  1652. - unsigned int need_flush; /* Really unmapped some ptes? */
  1653. - unsigned long start;
  1654. - unsigned long end;
  1655. - unsigned int fullmm; /* non-zero means full mm flush */
  1656. -};
  1657. -
  1658. -static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
  1659. - unsigned long address)
  1660. -{
  1661. - if (tlb->start > address)
  1662. - tlb->start = address;
  1663. - if (tlb->end < address + PAGE_SIZE)
  1664. - tlb->end = address + PAGE_SIZE;
  1665. -}
  1666. -
  1667. -static inline void init_tlb_gather(struct mmu_gather *tlb)
  1668. -{
  1669. - tlb->need_flush = 0;
  1670. -
  1671. - tlb->start = TASK_SIZE;
  1672. - tlb->end = 0;
  1673. -
  1674. - if (tlb->fullmm) {
  1675. - tlb->start = 0;
  1676. - tlb->end = TASK_SIZE;
  1677. - }
  1678. -}
  1679. -
  1680. -static inline void
  1681. -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
  1682. -{
  1683. - tlb->mm = mm;
  1684. - tlb->start = start;
  1685. - tlb->end = end;
  1686. - tlb->fullmm = !(start | (end+1));
  1687. -
  1688. - init_tlb_gather(tlb);
  1689. -}
  1690. -
  1691. -extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
  1692. - unsigned long end);
  1693. -
  1694. -static inline void
  1695. -tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  1696. -{
  1697. - flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
  1698. -}
  1699. -
  1700. -static inline void
  1701. -tlb_flush_mmu_free(struct mmu_gather *tlb)
  1702. -{
  1703. - init_tlb_gather(tlb);
  1704. -}
  1705. -
  1706. -static inline void
  1707. -tlb_flush_mmu(struct mmu_gather *tlb)
  1708. -{
  1709. - if (!tlb->need_flush)
  1710. - return;
  1711. -
  1712. - tlb_flush_mmu_tlbonly(tlb);
  1713. - tlb_flush_mmu_free(tlb);
  1714. -}
  1715. -
  1716. -/* tlb_finish_mmu
  1717. - * Called at the end of the shootdown operation to free up any resources
  1718. - * that were required.
  1719. - */
  1720. -static inline void
  1721. -tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  1722. -{
  1723. - tlb_flush_mmu(tlb);
  1724. -
  1725. - /* keep the page table cache within bounds */
  1726. - check_pgt_cache();
  1727. -}
  1728. -
  1729. -/* tlb_remove_page
  1730. - * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
  1731. - * while handling the additional races in SMP caused by other CPUs
  1732. - * caching valid mappings in their TLBs.
  1733. - */
  1734. -static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  1735. -{
  1736. - tlb->need_flush = 1;
  1737. - free_page_and_swap_cache(page);
  1738. - return 1; /* avoid calling tlb_flush_mmu */
  1739. -}
  1740. -
  1741. -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  1742. -{
  1743. - __tlb_remove_page(tlb, page);
  1744. -}
  1745. -
  1746. -/**
  1747. - * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
  1748. - *
  1749. - * Record the fact that pte's were really umapped in ->need_flush, so we can
  1750. - * later optimise away the tlb invalidate. This helps when userspace is
  1751. - * unmapping already-unmapped pages, which happens quite a lot.
  1752. - */
  1753. -#define tlb_remove_tlb_entry(tlb, ptep, address) \
  1754. - do { \
  1755. - tlb->need_flush = 1; \
  1756. - __tlb_remove_tlb_entry(tlb, ptep, address); \
  1757. - } while (0)
  1758. -
  1759. -#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
  1760. -
  1761. -#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
  1762. -
  1763. -#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
  1764. -
  1765. -#define tlb_migrate_finish(mm) do {} while (0)
  1766. -
  1767. -#endif
  1768. --- a/arch/um/include/asm/tlbflush.h
  1769. +++ /dev/null
  1770. @@ -1,31 +0,0 @@
  1771. -/*
  1772. - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  1773. - * Licensed under the GPL
  1774. - */
  1775. -
  1776. -#ifndef __UM_TLBFLUSH_H
  1777. -#define __UM_TLBFLUSH_H
  1778. -
  1779. -#include <linux/mm.h>
  1780. -
  1781. -/*
  1782. - * TLB flushing:
  1783. - *
  1784. - * - flush_tlb() flushes the current mm struct TLBs
  1785. - * - flush_tlb_all() flushes all processes TLBs
  1786. - * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  1787. - * - flush_tlb_page(vma, vmaddr) flushes one page
  1788. - * - flush_tlb_kernel_vm() flushes the kernel vm area
  1789. - * - flush_tlb_range(vma, start, end) flushes a range of pages
  1790. - */
  1791. -
  1792. -extern void flush_tlb_all(void);
  1793. -extern void flush_tlb_mm(struct mm_struct *mm);
  1794. -extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  1795. - unsigned long end);
  1796. -extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
  1797. -extern void flush_tlb_kernel_vm(void);
  1798. -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
  1799. -extern void __flush_tlb_one(unsigned long addr);
  1800. -
  1801. -#endif
  1802. --- a/arch/um/include/asm/uaccess.h
  1803. +++ /dev/null
  1804. @@ -1,178 +0,0 @@
  1805. -/*
  1806. - * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
  1807. - * Licensed under the GPL
  1808. - */
  1809. -
  1810. -#ifndef __UM_UACCESS_H
  1811. -#define __UM_UACCESS_H
  1812. -
  1813. -/* thread_info has a mm_segment_t in it, so put the definition up here */
  1814. -typedef struct {
  1815. - unsigned long seg;
  1816. -} mm_segment_t;
  1817. -
  1818. -#include <linux/thread_info.h>
  1819. -#include <linux/errno.h>
  1820. -#include <asm/processor.h>
  1821. -#include <asm/elf.h>
  1822. -
  1823. -#define VERIFY_READ 0
  1824. -#define VERIFY_WRITE 1
  1825. -
  1826. -/*
  1827. - * The fs value determines whether argument validity checking should be
  1828. - * performed or not. If get_fs() == USER_DS, checking is performed, with
  1829. - * get_fs() == KERNEL_DS, checking is bypassed.
  1830. - *
  1831. - * For historical reasons, these macros are grossly misnamed.
  1832. - */
  1833. -
  1834. -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
  1835. -
  1836. -#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
  1837. -#define USER_DS MAKE_MM_SEG(TASK_SIZE)
  1838. -
  1839. -#define get_ds() (KERNEL_DS)
  1840. -#define get_fs() (current_thread_info()->addr_limit)
  1841. -#define set_fs(x) (current_thread_info()->addr_limit = (x))
  1842. -
  1843. -#define segment_eq(a, b) ((a).seg == (b).seg)
  1844. -
  1845. -#define __under_task_size(addr, size) \
  1846. - (((unsigned long) (addr) < TASK_SIZE) && \
  1847. - (((unsigned long) (addr) + (size)) < TASK_SIZE))
  1848. -
  1849. -#define __access_ok_vsyscall(type, addr, size) \
  1850. - ((type == VERIFY_READ) && \
  1851. - ((unsigned long) (addr) >= FIXADDR_USER_START) && \
  1852. - ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
  1853. - ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
  1854. -
  1855. -#define __addr_range_nowrap(addr, size) \
  1856. - ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
  1857. -
  1858. -#define access_ok(type, addr, size) \
  1859. - (__addr_range_nowrap(addr, size) && \
  1860. - (__under_task_size(addr, size) || \
  1861. - __access_ok_vsyscall(type, addr, size) || \
  1862. - segment_eq(get_fs(), KERNEL_DS)))
  1863. -
  1864. -extern int copy_from_user(void *to, const void __user *from, int n);
  1865. -extern int copy_to_user(void __user *to, const void *from, int n);
  1866. -
  1867. -/*
  1868. - * strncpy_from_user: - Copy a NUL terminated string from userspace.
  1869. - * @dst: Destination address, in kernel space. This buffer must be at
  1870. - * least @count bytes long.
  1871. - * @src: Source address, in user space.
  1872. - * @count: Maximum number of bytes to copy, including the trailing NUL.
  1873. - *
  1874. - * Copies a NUL-terminated string from userspace to kernel space.
  1875. - *
  1876. - * On success, returns the length of the string (not including the trailing
  1877. - * NUL).
  1878. - *
  1879. - * If access to userspace fails, returns -EFAULT (some data may have been
  1880. - * copied).
  1881. - *
  1882. - * If @count is smaller than the length of the string, copies @count bytes
  1883. - * and returns @count.
  1884. - */
  1885. -
  1886. -extern int strncpy_from_user(char *dst, const char __user *src, int count);
  1887. -
  1888. -/*
  1889. - * __clear_user: - Zero a block of memory in user space, with less checking.
  1890. - * @to: Destination address, in user space.
  1891. - * @n: Number of bytes to zero.
  1892. - *
  1893. - * Zero a block of memory in user space. Caller must check
  1894. - * the specified block with access_ok() before calling this function.
  1895. - *
  1896. - * Returns number of bytes that could not be cleared.
  1897. - * On success, this will be zero.
  1898. - */
  1899. -extern int __clear_user(void __user *mem, int len);
  1900. -
  1901. -/*
  1902. - * clear_user: - Zero a block of memory in user space.
  1903. - * @to: Destination address, in user space.
  1904. - * @n: Number of bytes to zero.
  1905. - *
  1906. - * Zero a block of memory in user space.
  1907. - *
  1908. - * Returns number of bytes that could not be cleared.
  1909. - * On success, this will be zero.
  1910. - */
  1911. -extern int clear_user(void __user *mem, int len);
  1912. -
  1913. -/*
  1914. - * strlen_user: - Get the size of a string in user space.
  1915. - * @str: The string to measure.
  1916. - * @n: The maximum valid length
  1917. - *
  1918. - * Get the size of a NUL-terminated string in user space.
  1919. - *
  1920. - * Returns the size of the string INCLUDING the terminating NUL.
  1921. - * On exception, returns 0.
  1922. - * If the string is too long, returns a value greater than @n.
  1923. - */
  1924. -extern int strnlen_user(const void __user *str, int len);
  1925. -
  1926. -#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
  1927. -
  1928. -#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
  1929. -
  1930. -#define __copy_to_user_inatomic __copy_to_user
  1931. -#define __copy_from_user_inatomic __copy_from_user
  1932. -
  1933. -#define __get_user(x, ptr) \
  1934. -({ \
  1935. - const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
  1936. - __typeof__(x) __private_val; \
  1937. - int __private_ret = -EFAULT; \
  1938. - (x) = (__typeof__(*(__private_ptr)))0; \
  1939. - if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
  1940. - sizeof(*(__private_ptr))) == 0) { \
  1941. - (x) = (__typeof__(*(__private_ptr))) __private_val; \
  1942. - __private_ret = 0; \
  1943. - } \
  1944. - __private_ret; \
  1945. -})
  1946. -
  1947. -#define get_user(x, ptr) \
  1948. -({ \
  1949. - const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
  1950. - (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
  1951. - __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
  1952. -})
  1953. -
  1954. -#define __put_user(x, ptr) \
  1955. -({ \
  1956. - __typeof__(*(ptr)) __user *__private_ptr = ptr; \
  1957. - __typeof__(*(__private_ptr)) __private_val; \
  1958. - int __private_ret = -EFAULT; \
  1959. - __private_val = (__typeof__(*(__private_ptr))) (x); \
  1960. - if (__copy_to_user((__private_ptr), &__private_val, \
  1961. - sizeof(*(__private_ptr))) == 0) { \
  1962. - __private_ret = 0; \
  1963. - } \
  1964. - __private_ret; \
  1965. -})
  1966. -
  1967. -#define put_user(x, ptr) \
  1968. -({ \
  1969. - __typeof__(*(ptr)) __user *private_ptr = (ptr); \
  1970. - (access_ok(VERIFY_WRITE, private_ptr, sizeof(*private_ptr)) ? \
  1971. - __put_user(x, private_ptr) : -EFAULT); \
  1972. -})
  1973. -
  1974. -#define strlen_user(str) strnlen_user(str, ~0U >> 1)
  1975. -
  1976. -struct exception_table_entry
  1977. -{
  1978. - unsigned long insn;
  1979. - unsigned long fixup;
  1980. -};
  1981. -
  1982. -#endif
  1983. --- /dev/null
  1984. +++ b/arch/um/include/uapi/asm/Kbuild
  1985. @@ -0,0 +1,30 @@
  1986. +generic-y += barrier.h
  1987. +generic-y += bug.h
  1988. +generic-y += clkdev.h
  1989. +generic-y += cputime.h
  1990. +generic-y += current.h
  1991. +generic-y += delay.h
  1992. +generic-y += device.h
  1993. +generic-y += emergency-restart.h
  1994. +generic-y += exec.h
  1995. +generic-y += ftrace.h
  1996. +generic-y += futex.h
  1997. +generic-y += hardirq.h
  1998. +generic-y += hash.h
  1999. +generic-y += hw_irq.h
  2000. +generic-y += io.h
  2001. +generic-y += irq_regs.h
  2002. +generic-y += irq_work.h
  2003. +generic-y += kdebug.h
  2004. +generic-y += mcs_spinlock.h
  2005. +generic-y += mutex.h
  2006. +generic-y += param.h
  2007. +generic-y += pci.h
  2008. +generic-y += percpu.h
  2009. +generic-y += preempt.h
  2010. +generic-y += scatterlist.h
  2011. +generic-y += sections.h
  2012. +generic-y += switch_to.h
  2013. +generic-y += topology.h
  2014. +generic-y += trace_clock.h
  2015. +generic-y += xor.h
  2016. --- /dev/null
  2017. +++ b/arch/um/include/uapi/asm/a.out-core.h
  2018. @@ -0,0 +1,27 @@
  2019. +/* a.out coredump register dumper
  2020. + *
  2021. + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  2022. + * Written by David Howells (dhowells@redhat.com)
  2023. + *
  2024. + * This program is free software; you can redistribute it and/or
  2025. + * modify it under the terms of the GNU General Public Licence
  2026. + * as published by the Free Software Foundation; either version
  2027. + * 2 of the Licence, or (at your option) any later version.
  2028. + */
  2029. +
  2030. +#ifndef __UM_A_OUT_CORE_H
  2031. +#define __UM_A_OUT_CORE_H
  2032. +
  2033. +#ifdef __KERNEL__
  2034. +
  2035. +#include <linux/user.h>
  2036. +
  2037. +/*
  2038. + * fill in the user structure for an a.out core dump
  2039. + */
  2040. +static inline void aout_dump_thread(struct pt_regs *regs, struct user *u)
  2041. +{
  2042. +}
  2043. +
  2044. +#endif /* __KERNEL__ */
  2045. +#endif /* __UM_A_OUT_CORE_H */
  2046. --- /dev/null
  2047. +++ b/arch/um/include/uapi/asm/bugs.h
  2048. @@ -0,0 +1,6 @@
  2049. +#ifndef __UM_BUGS_H
  2050. +#define __UM_BUGS_H
  2051. +
  2052. +void check_bugs(void);
  2053. +
  2054. +#endif
  2055. --- /dev/null
  2056. +++ b/arch/um/include/uapi/asm/cache.h
  2057. @@ -0,0 +1,17 @@
  2058. +#ifndef __UM_CACHE_H
  2059. +#define __UM_CACHE_H
  2060. +
  2061. +
  2062. +#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
  2063. +# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
  2064. +#elif defined(CONFIG_UML_X86) /* 64-bit */
  2065. +# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
  2066. +#else
  2067. +/* XXX: this was taken from x86, now it's completely random. Luckily only
  2068. + * affects SMP padding. */
  2069. +# define L1_CACHE_SHIFT 5
  2070. +#endif
  2071. +
  2072. +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
  2073. +
  2074. +#endif
  2075. --- /dev/null
  2076. +++ b/arch/um/include/uapi/asm/common.lds.S
  2077. @@ -0,0 +1,107 @@
  2078. +#include <asm-generic/vmlinux.lds.h>
  2079. +
  2080. + .fini : { *(.fini) } =0x9090
  2081. + _etext = .;
  2082. + PROVIDE (etext = .);
  2083. +
  2084. + . = ALIGN(4096);
  2085. + _sdata = .;
  2086. + PROVIDE (sdata = .);
  2087. +
  2088. + RODATA
  2089. +
  2090. + .unprotected : { *(.unprotected) }
  2091. + . = ALIGN(4096);
  2092. + PROVIDE (_unprotected_end = .);
  2093. +
  2094. + . = ALIGN(4096);
  2095. + .note : { *(.note.*) }
  2096. + EXCEPTION_TABLE(0)
  2097. +
  2098. + BUG_TABLE
  2099. +
  2100. + .uml.setup.init : {
  2101. + __uml_setup_start = .;
  2102. + *(.uml.setup.init)
  2103. + __uml_setup_end = .;
  2104. + }
  2105. +
  2106. + .uml.help.init : {
  2107. + __uml_help_start = .;
  2108. + *(.uml.help.init)
  2109. + __uml_help_end = .;
  2110. + }
  2111. +
  2112. + .uml.postsetup.init : {
  2113. + __uml_postsetup_start = .;
  2114. + *(.uml.postsetup.init)
  2115. + __uml_postsetup_end = .;
  2116. + }
  2117. +
  2118. + .init.setup : {
  2119. + INIT_SETUP(0)
  2120. + }
  2121. +
  2122. + PERCPU_SECTION(32)
  2123. +
  2124. + .initcall.init : {
  2125. + INIT_CALLS
  2126. + }
  2127. +
  2128. + .con_initcall.init : {
  2129. + CON_INITCALL
  2130. + }
  2131. +
  2132. + .uml.initcall.init : {
  2133. + __uml_initcall_start = .;
  2134. + *(.uml.initcall.init)
  2135. + __uml_initcall_end = .;
  2136. + }
  2137. +
  2138. + SECURITY_INIT
  2139. +
  2140. + .exitcall : {
  2141. + __exitcall_begin = .;
  2142. + *(.exitcall.exit)
  2143. + __exitcall_end = .;
  2144. + }
  2145. +
  2146. + .uml.exitcall : {
  2147. + __uml_exitcall_begin = .;
  2148. + *(.uml.exitcall.exit)
  2149. + __uml_exitcall_end = .;
  2150. + }
  2151. +
  2152. + . = ALIGN(4);
  2153. + .altinstructions : {
  2154. + __alt_instructions = .;
  2155. + *(.altinstructions)
  2156. + __alt_instructions_end = .;
  2157. + }
  2158. + .altinstr_replacement : { *(.altinstr_replacement) }
  2159. + /* .exit.text is discard at runtime, not link time, to deal with references
  2160. + from .altinstructions and .eh_frame */
  2161. + .exit.text : { *(.exit.text) }
  2162. + .exit.data : { *(.exit.data) }
  2163. +
  2164. + .preinit_array : {
  2165. + __preinit_array_start = .;
  2166. + *(.preinit_array)
  2167. + __preinit_array_end = .;
  2168. + }
  2169. + .init_array : {
  2170. + __init_array_start = .;
  2171. + *(.init_array)
  2172. + __init_array_end = .;
  2173. + }
  2174. + .fini_array : {
  2175. + __fini_array_start = .;
  2176. + *(.fini_array)
  2177. + __fini_array_end = .;
  2178. + }
  2179. +
  2180. + . = ALIGN(4096);
  2181. + .init.ramfs : {
  2182. + INIT_RAM_FS
  2183. + }
  2184. +
  2185. --- /dev/null
  2186. +++ b/arch/um/include/uapi/asm/dma.h
  2187. @@ -0,0 +1,10 @@
  2188. +#ifndef __UM_DMA_H
  2189. +#define __UM_DMA_H
  2190. +
  2191. +#include <asm/io.h>
  2192. +
  2193. +extern unsigned long uml_physmem;
  2194. +
  2195. +#define MAX_DMA_ADDRESS (uml_physmem)
  2196. +
  2197. +#endif
  2198. --- /dev/null
  2199. +++ b/arch/um/include/uapi/asm/fixmap.h
  2200. @@ -0,0 +1,60 @@
  2201. +#ifndef __UM_FIXMAP_H
  2202. +#define __UM_FIXMAP_H
  2203. +
  2204. +#include <asm/processor.h>
  2205. +#include <asm/kmap_types.h>
  2206. +#include <asm/archparam.h>
  2207. +#include <asm/page.h>
  2208. +#include <linux/threads.h>
  2209. +
  2210. +/*
  2211. + * Here we define all the compile-time 'special' virtual
  2212. + * addresses. The point is to have a constant address at
  2213. + * compile time, but to set the physical address only
  2214. + * in the boot process. We allocate these special addresses
  2215. + * from the end of virtual memory (0xfffff000) backwards.
  2216. + * Also this lets us do fail-safe vmalloc(), we
  2217. + * can guarantee that these special addresses and
  2218. + * vmalloc()-ed addresses never overlap.
  2219. + *
  2220. + * these 'compile-time allocated' memory buffers are
  2221. + * fixed-size 4k pages. (or larger if used with an increment
  2222. + * highger than 1) use fixmap_set(idx,phys) to associate
  2223. + * physical memory with fixmap indices.
  2224. + *
  2225. + * TLB entries of such buffers will not be flushed across
  2226. + * task switches.
  2227. + */
  2228. +
  2229. +/*
  2230. + * on UP currently we will have no trace of the fixmap mechanizm,
  2231. + * no page table allocations, etc. This might change in the
  2232. + * future, say framebuffers for the console driver(s) could be
  2233. + * fix-mapped?
  2234. + */
  2235. +enum fixed_addresses {
  2236. +#ifdef CONFIG_HIGHMEM
  2237. + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
  2238. + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
  2239. +#endif
  2240. + __end_of_fixed_addresses
  2241. +};
  2242. +
  2243. +extern void __set_fixmap (enum fixed_addresses idx,
  2244. + unsigned long phys, pgprot_t flags);
  2245. +
  2246. +/*
  2247. + * used by vmalloc.c.
  2248. + *
  2249. + * Leave one empty page between vmalloc'ed areas and
  2250. + * the start of the fixmap, and leave one page empty
  2251. + * at the top of mem..
  2252. + */
  2253. +
  2254. +#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
  2255. +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
  2256. +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
  2257. +
  2258. +#include <asm-generic/fixmap.h>
  2259. +
  2260. +#endif
  2261. --- /dev/null
  2262. +++ b/arch/um/include/uapi/asm/irq.h
  2263. @@ -0,0 +1,23 @@
  2264. +#ifndef __UM_IRQ_H
  2265. +#define __UM_IRQ_H
  2266. +
  2267. +#define TIMER_IRQ 0
  2268. +#define UMN_IRQ 1
  2269. +#define CONSOLE_IRQ 2
  2270. +#define CONSOLE_WRITE_IRQ 3
  2271. +#define UBD_IRQ 4
  2272. +#define UM_ETH_IRQ 5
  2273. +#define SSL_IRQ 6
  2274. +#define SSL_WRITE_IRQ 7
  2275. +#define ACCEPT_IRQ 8
  2276. +#define MCONSOLE_IRQ 9
  2277. +#define WINCH_IRQ 10
  2278. +#define SIGIO_WRITE_IRQ 11
  2279. +#define TELNETD_IRQ 12
  2280. +#define XTERM_IRQ 13
  2281. +#define RANDOM_IRQ 14
  2282. +
  2283. +#define LAST_IRQ RANDOM_IRQ
  2284. +#define NR_IRQS (LAST_IRQ + 1)
  2285. +
  2286. +#endif
  2287. --- /dev/null
  2288. +++ b/arch/um/include/uapi/asm/irqflags.h
  2289. @@ -0,0 +1,42 @@
  2290. +#ifndef __UM_IRQFLAGS_H
  2291. +#define __UM_IRQFLAGS_H
  2292. +
  2293. +extern int get_signals(void);
  2294. +extern int set_signals(int enable);
  2295. +extern void block_signals(void);
  2296. +extern void unblock_signals(void);
  2297. +
  2298. +static inline unsigned long arch_local_save_flags(void)
  2299. +{
  2300. + return get_signals();
  2301. +}
  2302. +
  2303. +static inline void arch_local_irq_restore(unsigned long flags)
  2304. +{
  2305. + set_signals(flags);
  2306. +}
  2307. +
  2308. +static inline void arch_local_irq_enable(void)
  2309. +{
  2310. + unblock_signals();
  2311. +}
  2312. +
  2313. +static inline void arch_local_irq_disable(void)
  2314. +{
  2315. + block_signals();
  2316. +}
  2317. +
  2318. +static inline unsigned long arch_local_irq_save(void)
  2319. +{
  2320. + unsigned long flags;
  2321. + flags = arch_local_save_flags();
  2322. + arch_local_irq_disable();
  2323. + return flags;
  2324. +}
  2325. +
  2326. +static inline bool arch_irqs_disabled(void)
  2327. +{
  2328. + return arch_local_save_flags() == 0;
  2329. +}
  2330. +
  2331. +#endif
  2332. --- /dev/null
  2333. +++ b/arch/um/include/uapi/asm/kmap_types.h
  2334. @@ -0,0 +1,13 @@
  2335. +/*
  2336. + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
  2337. + * Licensed under the GPL
  2338. + */
  2339. +
  2340. +#ifndef __UM_KMAP_TYPES_H
  2341. +#define __UM_KMAP_TYPES_H
  2342. +
  2343. +/* No more #include "asm/arch/kmap_types.h" ! */
  2344. +
  2345. +#define KM_TYPE_NR 14
  2346. +
  2347. +#endif
  2348. --- /dev/null
  2349. +++ b/arch/um/include/uapi/asm/kvm_para.h
  2350. @@ -0,0 +1 @@
  2351. +#include <asm-generic/kvm_para.h>
  2352. --- /dev/null
  2353. +++ b/arch/um/include/uapi/asm/mmu.h
  2354. @@ -0,0 +1,24 @@
  2355. +/*
  2356. + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  2357. + * Licensed under the GPL
  2358. + */
  2359. +
  2360. +#ifndef __ARCH_UM_MMU_H
  2361. +#define __ARCH_UM_MMU_H
  2362. +
  2363. +#include <mm_id.h>
  2364. +#include <asm/mm_context.h>
  2365. +
  2366. +typedef struct mm_context {
  2367. + struct mm_id id;
  2368. + struct uml_arch_mm_context arch;
  2369. + struct page *stub_pages[2];
  2370. +} mm_context_t;
  2371. +
  2372. +extern void __switch_mm(struct mm_id * mm_idp);
  2373. +
  2374. +/* Avoid tangled inclusion with asm/ldt.h */
  2375. +extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
  2376. +extern void free_ldt(struct mm_context *mm);
  2377. +
  2378. +#endif
  2379. --- /dev/null
  2380. +++ b/arch/um/include/uapi/asm/mmu_context.h
  2381. @@ -0,0 +1,58 @@
  2382. +/*
  2383. + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  2384. + * Licensed under the GPL
  2385. + */
  2386. +
  2387. +#ifndef __UM_MMU_CONTEXT_H
  2388. +#define __UM_MMU_CONTEXT_H
  2389. +
  2390. +#include <linux/sched.h>
  2391. +#include <asm/mmu.h>
  2392. +
  2393. +extern void uml_setup_stubs(struct mm_struct *mm);
  2394. +extern void arch_exit_mmap(struct mm_struct *mm);
  2395. +
  2396. +#define deactivate_mm(tsk,mm) do { } while (0)
  2397. +
  2398. +extern void force_flush_all(void);
  2399. +
  2400. +static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
  2401. +{
  2402. + /*
  2403. + * This is called by fs/exec.c and sys_unshare()
  2404. + * when the new ->mm is used for the first time.
  2405. + */
  2406. + __switch_mm(&new->context.id);
  2407. + down_write(&new->mmap_sem);
  2408. + uml_setup_stubs(new);
  2409. + up_write(&new->mmap_sem);
  2410. +}
  2411. +
  2412. +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  2413. + struct task_struct *tsk)
  2414. +{
  2415. + unsigned cpu = smp_processor_id();
  2416. +
  2417. + if(prev != next){
  2418. + cpumask_clear_cpu(cpu, mm_cpumask(prev));
  2419. + cpumask_set_cpu(cpu, mm_cpumask(next));
  2420. + if(next != &init_mm)
  2421. + __switch_mm(&next->context.id);
  2422. + }
  2423. +}
  2424. +
  2425. +static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
  2426. +{
  2427. + uml_setup_stubs(mm);
  2428. +}
  2429. +
  2430. +static inline void enter_lazy_tlb(struct mm_struct *mm,
  2431. + struct task_struct *tsk)
  2432. +{
  2433. +}
  2434. +
  2435. +extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
  2436. +
  2437. +extern void destroy_context(struct mm_struct *mm);
  2438. +
  2439. +#endif
  2440. --- /dev/null
  2441. +++ b/arch/um/include/uapi/asm/page.h
  2442. @@ -0,0 +1,127 @@
  2443. +/*
  2444. + * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
  2445. + * Copyright 2003 PathScale, Inc.
  2446. + * Licensed under the GPL
  2447. + */
  2448. +
  2449. +#ifndef __UM_PAGE_H
  2450. +#define __UM_PAGE_H
  2451. +
  2452. +#include <linux/const.h>
  2453. +
  2454. +/* PAGE_SHIFT determines the page size */
  2455. +#define PAGE_SHIFT 12
  2456. +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
  2457. +#define PAGE_MASK (~(PAGE_SIZE-1))
  2458. +
  2459. +#ifndef __ASSEMBLY__
  2460. +
  2461. +struct page;
  2462. +
  2463. +#include <linux/types.h>
  2464. +#include <asm/vm-flags.h>
  2465. +
  2466. +/*
  2467. + * These are used to make use of C type-checking..
  2468. + */
  2469. +
  2470. +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
  2471. +#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
  2472. +
  2473. +#define clear_user_page(page, vaddr, pg) clear_page(page)
  2474. +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
  2475. +
  2476. +#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
  2477. +
  2478. +typedef struct { unsigned long pte_low, pte_high; } pte_t;
  2479. +typedef struct { unsigned long pmd; } pmd_t;
  2480. +typedef struct { unsigned long pgd; } pgd_t;
  2481. +#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
  2482. +
  2483. +#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
  2484. +#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
  2485. +#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
  2486. +#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
  2487. + smp_wmb(); \
  2488. + (to).pte_low = (from).pte_low; })
  2489. +#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
  2490. +#define pte_set_val(pte, phys, prot) \
  2491. + ({ (pte).pte_high = (phys) >> 32; \
  2492. + (pte).pte_low = (phys) | pgprot_val(prot); })
  2493. +
  2494. +#define pmd_val(x) ((x).pmd)
  2495. +#define __pmd(x) ((pmd_t) { (x) } )
  2496. +
  2497. +typedef unsigned long long pfn_t;
  2498. +typedef unsigned long long phys_t;
  2499. +
  2500. +#else
  2501. +
  2502. +typedef struct { unsigned long pte; } pte_t;
  2503. +typedef struct { unsigned long pgd; } pgd_t;
  2504. +
  2505. +#ifdef CONFIG_3_LEVEL_PGTABLES
  2506. +typedef struct { unsigned long pmd; } pmd_t;
  2507. +#define pmd_val(x) ((x).pmd)
  2508. +#define __pmd(x) ((pmd_t) { (x) } )
  2509. +#endif
  2510. +
  2511. +#define pte_val(x) ((x).pte)
  2512. +
  2513. +
  2514. +#define pte_get_bits(p, bits) ((p).pte & (bits))
  2515. +#define pte_set_bits(p, bits) ((p).pte |= (bits))
  2516. +#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
  2517. +#define pte_copy(to, from) ((to).pte = (from).pte)
  2518. +#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
  2519. +#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
  2520. +
  2521. +typedef unsigned long pfn_t;
  2522. +typedef unsigned long phys_t;
  2523. +
  2524. +#endif
  2525. +
  2526. +typedef struct { unsigned long pgprot; } pgprot_t;
  2527. +
  2528. +typedef struct page *pgtable_t;
  2529. +
  2530. +#define pgd_val(x) ((x).pgd)
  2531. +#define pgprot_val(x) ((x).pgprot)
  2532. +
  2533. +#define __pte(x) ((pte_t) { (x) } )
  2534. +#define __pgd(x) ((pgd_t) { (x) } )
  2535. +#define __pgprot(x) ((pgprot_t) { (x) } )
  2536. +
  2537. +extern unsigned long uml_physmem;
  2538. +
  2539. +#define PAGE_OFFSET (uml_physmem)
  2540. +#define KERNELBASE PAGE_OFFSET
  2541. +
  2542. +#define __va_space (8*1024*1024)
  2543. +
  2544. +#include <mem.h>
  2545. +
  2546. +/* Cast to unsigned long before casting to void * to avoid a warning from
  2547. + * mmap_kmem about cutting a long long down to a void *. Not sure that
  2548. + * casting is the right thing, but 32-bit UML can't have 64-bit virtual
  2549. + * addresses
  2550. + */
  2551. +#define __pa(virt) to_phys((void *) (unsigned long) (virt))
  2552. +#define __va(phys) to_virt((unsigned long) (phys))
  2553. +
  2554. +#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
  2555. +#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
  2556. +
  2557. +#define pfn_valid(pfn) ((pfn) < max_mapnr)
  2558. +#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
  2559. +
  2560. +#include <asm-generic/memory_model.h>
  2561. +#include <asm-generic/getorder.h>
  2562. +
  2563. +#endif /* __ASSEMBLY__ */
  2564. +
  2565. +#ifdef CONFIG_X86_32
  2566. +#define __HAVE_ARCH_GATE_AREA 1
  2567. +#endif
  2568. +
  2569. +#endif /* __UM_PAGE_H */
  2570. --- /dev/null
  2571. +++ b/arch/um/include/uapi/asm/pgalloc.h
  2572. @@ -0,0 +1,61 @@
  2573. +/*
  2574. + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
  2575. + * Copyright 2003 PathScale, Inc.
  2576. + * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
  2577. + * Licensed under the GPL
  2578. + */
  2579. +
  2580. +#ifndef __UM_PGALLOC_H
  2581. +#define __UM_PGALLOC_H
  2582. +
  2583. +#include <linux/mm.h>
  2584. +
  2585. +#define pmd_populate_kernel(mm, pmd, pte) \
  2586. + set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
  2587. +
  2588. +#define pmd_populate(mm, pmd, pte) \
  2589. + set_pmd(pmd, __pmd(_PAGE_TABLE + \
  2590. + ((unsigned long long)page_to_pfn(pte) << \
  2591. + (unsigned long long) PAGE_SHIFT)))
  2592. +#define pmd_pgtable(pmd) pmd_page(pmd)
  2593. +
  2594. +/*
  2595. + * Allocate and free page tables.
  2596. + */
  2597. +extern pgd_t *pgd_alloc(struct mm_struct *);
  2598. +extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
  2599. +
  2600. +extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
  2601. +extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
  2602. +
  2603. +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  2604. +{
  2605. + free_page((unsigned long) pte);
  2606. +}
  2607. +
  2608. +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
  2609. +{
  2610. + pgtable_page_dtor(pte);
  2611. + __free_page(pte);
  2612. +}
  2613. +
  2614. +#define __pte_free_tlb(tlb,pte, address) \
  2615. +do { \
  2616. + pgtable_page_dtor(pte); \
  2617. + tlb_remove_page((tlb),(pte)); \
  2618. +} while (0)
  2619. +
  2620. +#ifdef CONFIG_3_LEVEL_PGTABLES
  2621. +
  2622. +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  2623. +{
  2624. + free_page((unsigned long)pmd);
  2625. +}
  2626. +
  2627. +#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
  2628. +#endif
  2629. +
  2630. +#define check_pgt_cache() do { } while (0)
  2631. +
  2632. +#endif
  2633. +
  2634. --- /dev/null
  2635. +++ b/arch/um/include/uapi/asm/pgtable-2level.h
  2636. @@ -0,0 +1,53 @@
  2637. +/*
  2638. + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
  2639. + * Copyright 2003 PathScale, Inc.
  2640. + * Derived from include/asm-i386/pgtable.h
  2641. + * Licensed under the GPL
  2642. + */
  2643. +
  2644. +#ifndef __UM_PGTABLE_2LEVEL_H
  2645. +#define __UM_PGTABLE_2LEVEL_H
  2646. +
  2647. +#include <asm-generic/pgtable-nopmd.h>
  2648. +
  2649. +/* PGDIR_SHIFT determines what a third-level page table entry can map */
  2650. +
  2651. +#define PGDIR_SHIFT 22
  2652. +#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  2653. +#define PGDIR_MASK (~(PGDIR_SIZE-1))
  2654. +
  2655. +/*
  2656. + * entries per page directory level: the i386 is two-level, so
  2657. + * we don't really have any PMD directory physically.
  2658. + */
  2659. +#define PTRS_PER_PTE 1024
  2660. +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
  2661. +#define PTRS_PER_PGD 1024
  2662. +#define FIRST_USER_ADDRESS 0
  2663. +
  2664. +#define pte_ERROR(e) \
  2665. + printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
  2666. + pte_val(e))
  2667. +#define pgd_ERROR(e) \
  2668. + printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
  2669. + pgd_val(e))
  2670. +
  2671. +static inline int pgd_newpage(pgd_t pgd) { return 0; }
  2672. +static inline void pgd_mkuptodate(pgd_t pgd) { }
  2673. +
  2674. +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
  2675. +
  2676. +#define pte_pfn(x) phys_to_pfn(pte_val(x))
  2677. +#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
  2678. +#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
  2679. +
  2680. +/*
  2681. + * Bits 0 through 4 are taken
  2682. + */
  2683. +#define PTE_FILE_MAX_BITS 27
  2684. +
  2685. +#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
  2686. +
  2687. +#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
  2688. +
  2689. +#endif
  2690. --- /dev/null
  2691. +++ b/arch/um/include/uapi/asm/pgtable-3level.h
  2692. @@ -0,0 +1,136 @@
  2693. +/*
  2694. + * Copyright 2003 PathScale Inc
  2695. + * Derived from include/asm-i386/pgtable.h
  2696. + * Licensed under the GPL
  2697. + */
  2698. +
  2699. +#ifndef __UM_PGTABLE_3LEVEL_H
  2700. +#define __UM_PGTABLE_3LEVEL_H
  2701. +
  2702. +#include <asm-generic/pgtable-nopud.h>
  2703. +
  2704. +/* PGDIR_SHIFT determines what a third-level page table entry can map */
  2705. +
  2706. +#ifdef CONFIG_64BIT
  2707. +#define PGDIR_SHIFT 30
  2708. +#else
  2709. +#define PGDIR_SHIFT 31
  2710. +#endif
  2711. +#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  2712. +#define PGDIR_MASK (~(PGDIR_SIZE-1))
  2713. +
  2714. +/* PMD_SHIFT determines the size of the area a second-level page table can
  2715. + * map
  2716. + */
  2717. +
  2718. +#define PMD_SHIFT 21
  2719. +#define PMD_SIZE (1UL << PMD_SHIFT)
  2720. +#define PMD_MASK (~(PMD_SIZE-1))
  2721. +
  2722. +/*
  2723. + * entries per page directory level
  2724. + */
  2725. +
  2726. +#define PTRS_PER_PTE 512
  2727. +#ifdef CONFIG_64BIT
  2728. +#define PTRS_PER_PMD 512
  2729. +#define PTRS_PER_PGD 512
  2730. +#else
  2731. +#define PTRS_PER_PMD 1024
  2732. +#define PTRS_PER_PGD 1024
  2733. +#endif
  2734. +
  2735. +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
  2736. +#define FIRST_USER_ADDRESS 0
  2737. +
  2738. +#define pte_ERROR(e) \
  2739. + printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
  2740. + pte_val(e))
  2741. +#define pmd_ERROR(e) \
  2742. + printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
  2743. + pmd_val(e))
  2744. +#define pgd_ERROR(e) \
  2745. + printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
  2746. + pgd_val(e))
  2747. +
  2748. +#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
  2749. +#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
  2750. +#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
  2751. +#define pud_populate(mm, pud, pmd) \
  2752. + set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
  2753. +
  2754. +#ifdef CONFIG_64BIT
  2755. +#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
  2756. +#else
  2757. +#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
  2758. +#endif
  2759. +
  2760. +static inline int pgd_newpage(pgd_t pgd)
  2761. +{
  2762. + return(pgd_val(pgd) & _PAGE_NEWPAGE);
  2763. +}
  2764. +
  2765. +static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
  2766. +
  2767. +#ifdef CONFIG_64BIT
  2768. +#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
  2769. +#else
  2770. +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
  2771. +#endif
  2772. +
  2773. +struct mm_struct;
  2774. +extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
  2775. +
  2776. +static inline void pud_clear (pud_t *pud)
  2777. +{
  2778. + set_pud(pud, __pud(_PAGE_NEWPAGE));
  2779. +}
  2780. +
  2781. +#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
  2782. +#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
  2783. +
  2784. +/* Find an entry in the second-level page table.. */
  2785. +#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
  2786. + pmd_index(address))
  2787. +
  2788. +static inline unsigned long pte_pfn(pte_t pte)
  2789. +{
  2790. + return phys_to_pfn(pte_val(pte));
  2791. +}
  2792. +
  2793. +static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
  2794. +{
  2795. + pte_t pte;
  2796. + phys_t phys = pfn_to_phys(page_nr);
  2797. +
  2798. + pte_set_val(pte, phys, pgprot);
  2799. + return pte;
  2800. +}
  2801. +
  2802. +static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
  2803. +{
  2804. + return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
  2805. +}
  2806. +
  2807. +/*
  2808. + * Bits 0 through 3 are taken in the low part of the pte,
  2809. + * put the 32 bits of offset into the high part.
  2810. + */
  2811. +#define PTE_FILE_MAX_BITS 32
  2812. +
  2813. +#ifdef CONFIG_64BIT
  2814. +
  2815. +#define pte_to_pgoff(p) ((p).pte >> 32)
  2816. +
  2817. +#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
  2818. +
  2819. +#else
  2820. +
  2821. +#define pte_to_pgoff(pte) ((pte).pte_high)
  2822. +
  2823. +#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
  2824. +
  2825. +#endif
  2826. +
  2827. +#endif
  2828. +
  2829. --- /dev/null
  2830. +++ b/arch/um/include/uapi/asm/pgtable.h
  2831. @@ -0,0 +1,375 @@
  2832. +/*
  2833. + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  2834. + * Copyright 2003 PathScale, Inc.
  2835. + * Derived from include/asm-i386/pgtable.h
  2836. + * Licensed under the GPL
  2837. + */
  2838. +
  2839. +#ifndef __UM_PGTABLE_H
  2840. +#define __UM_PGTABLE_H
  2841. +
  2842. +#include <asm/fixmap.h>
  2843. +
  2844. +#define _PAGE_PRESENT 0x001
  2845. +#define _PAGE_NEWPAGE 0x002
  2846. +#define _PAGE_NEWPROT 0x004
  2847. +#define _PAGE_RW 0x020
  2848. +#define _PAGE_USER 0x040
  2849. +#define _PAGE_ACCESSED 0x080
  2850. +#define _PAGE_DIRTY 0x100
  2851. +/* If _PAGE_PRESENT is clear, we use these: */
  2852. +#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
  2853. +#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
  2854. + pte_present gives true */
  2855. +
  2856. +#ifdef CONFIG_3_LEVEL_PGTABLES
  2857. +#include <asm/pgtable-3level.h>
  2858. +#else
  2859. +#include <asm/pgtable-2level.h>
  2860. +#endif
  2861. +
  2862. +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  2863. +
  2864. +/* zero page used for uninitialized stuff */
  2865. +extern unsigned long *empty_zero_page;
  2866. +
  2867. +#define pgtable_cache_init() do ; while (0)
  2868. +
  2869. +/* Just any arbitrary offset to the start of the vmalloc VM area: the
  2870. + * current 8MB value just means that there will be a 8MB "hole" after the
  2871. + * physical memory until the kernel virtual memory starts. That means that
  2872. + * any out-of-bounds memory accesses will hopefully be caught.
  2873. + * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  2874. + * area for the same reason. ;)
  2875. + */
  2876. +
  2877. +extern unsigned long end_iomem;
  2878. +
  2879. +#define VMALLOC_OFFSET (__va_space)
  2880. +#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
  2881. +#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
  2882. +#ifdef CONFIG_HIGHMEM
  2883. +# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
  2884. +#else
  2885. +# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
  2886. +#endif
  2887. +#define MODULES_VADDR VMALLOC_START
  2888. +#define MODULES_END VMALLOC_END
  2889. +#define MODULES_LEN (MODULES_VADDR - MODULES_END)
  2890. +
  2891. +#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
  2892. +#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
  2893. +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
  2894. +#define __PAGE_KERNEL_EXEC \
  2895. + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
  2896. +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
  2897. +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
  2898. +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
  2899. +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
  2900. +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
  2901. +#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
  2902. +
  2903. +/*
  2904. + * The i386 can't do page protection for execute, and considers that the same
  2905. + * are read.
  2906. + * Also, write permissions imply read permissions. This is the closest we can
  2907. + * get..
  2908. + */
  2909. +#define __P000 PAGE_NONE
  2910. +#define __P001 PAGE_READONLY
  2911. +#define __P010 PAGE_COPY
  2912. +#define __P011 PAGE_COPY
  2913. +#define __P100 PAGE_READONLY
  2914. +#define __P101 PAGE_READONLY
  2915. +#define __P110 PAGE_COPY
  2916. +#define __P111 PAGE_COPY
  2917. +
  2918. +#define __S000 PAGE_NONE
  2919. +#define __S001 PAGE_READONLY
  2920. +#define __S010 PAGE_SHARED
  2921. +#define __S011 PAGE_SHARED
  2922. +#define __S100 PAGE_READONLY
  2923. +#define __S101 PAGE_READONLY
  2924. +#define __S110 PAGE_SHARED
  2925. +#define __S111 PAGE_SHARED
  2926. +
  2927. +/*
  2928. + * ZERO_PAGE is a global shared page that is always zero: used
  2929. + * for zero-mapped memory areas etc..
  2930. + */
  2931. +#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
  2932. +
  2933. +#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
  2934. +
  2935. +#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
  2936. +#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
  2937. +
  2938. +#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
  2939. +#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
  2940. +
  2941. +#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
  2942. +#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
  2943. +
  2944. +#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
  2945. +#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
  2946. +
  2947. +#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
  2948. +
  2949. +#define pte_page(x) pfn_to_page(pte_pfn(x))
  2950. +
  2951. +#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
  2952. +
  2953. +/*
  2954. + * =================================
  2955. + * Flags checking section.
  2956. + * =================================
  2957. + */
  2958. +
  2959. +static inline int pte_none(pte_t pte)
  2960. +{
  2961. + return pte_is_zero(pte);
  2962. +}
  2963. +
  2964. +/*
  2965. + * The following only work if pte_present() is true.
  2966. + * Undefined behaviour if not..
  2967. + */
  2968. +static inline int pte_read(pte_t pte)
  2969. +{
  2970. + return((pte_get_bits(pte, _PAGE_USER)) &&
  2971. + !(pte_get_bits(pte, _PAGE_PROTNONE)));
  2972. +}
  2973. +
  2974. +static inline int pte_exec(pte_t pte){
  2975. + return((pte_get_bits(pte, _PAGE_USER)) &&
  2976. + !(pte_get_bits(pte, _PAGE_PROTNONE)));
  2977. +}
  2978. +
  2979. +static inline int pte_write(pte_t pte)
  2980. +{
  2981. + return((pte_get_bits(pte, _PAGE_RW)) &&
  2982. + !(pte_get_bits(pte, _PAGE_PROTNONE)));
  2983. +}
  2984. +
  2985. +/*
  2986. + * The following only works if pte_present() is not true.
  2987. + */
  2988. +static inline int pte_file(pte_t pte)
  2989. +{
  2990. + return pte_get_bits(pte, _PAGE_FILE);
  2991. +}
  2992. +
  2993. +static inline int pte_dirty(pte_t pte)
  2994. +{
  2995. + return pte_get_bits(pte, _PAGE_DIRTY);
  2996. +}
  2997. +
  2998. +static inline int pte_young(pte_t pte)
  2999. +{
  3000. + return pte_get_bits(pte, _PAGE_ACCESSED);
  3001. +}
  3002. +
  3003. +static inline int pte_newpage(pte_t pte)
  3004. +{
  3005. + return pte_get_bits(pte, _PAGE_NEWPAGE);
  3006. +}
  3007. +
  3008. +static inline int pte_newprot(pte_t pte)
  3009. +{
  3010. + return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
  3011. +}
  3012. +
  3013. +static inline int pte_special(pte_t pte)
  3014. +{
  3015. + return 0;
  3016. +}
  3017. +
  3018. +/*
  3019. + * =================================
  3020. + * Flags setting section.
  3021. + * =================================
  3022. + */
  3023. +
  3024. +static inline pte_t pte_mknewprot(pte_t pte)
  3025. +{
  3026. + pte_set_bits(pte, _PAGE_NEWPROT);
  3027. + return(pte);
  3028. +}
  3029. +
  3030. +static inline pte_t pte_mkclean(pte_t pte)
  3031. +{
  3032. + pte_clear_bits(pte, _PAGE_DIRTY);
  3033. + return(pte);
  3034. +}
  3035. +
  3036. +static inline pte_t pte_mkold(pte_t pte)
  3037. +{
  3038. + pte_clear_bits(pte, _PAGE_ACCESSED);
  3039. + return(pte);
  3040. +}
  3041. +
  3042. +static inline pte_t pte_wrprotect(pte_t pte)
  3043. +{
  3044. + pte_clear_bits(pte, _PAGE_RW);
  3045. + return(pte_mknewprot(pte));
  3046. +}
  3047. +
  3048. +static inline pte_t pte_mkread(pte_t pte)
  3049. +{
  3050. + pte_set_bits(pte, _PAGE_USER);
  3051. + return(pte_mknewprot(pte));
  3052. +}
  3053. +
  3054. +static inline pte_t pte_mkdirty(pte_t pte)
  3055. +{
  3056. + pte_set_bits(pte, _PAGE_DIRTY);
  3057. + return(pte);
  3058. +}
  3059. +
  3060. +static inline pte_t pte_mkyoung(pte_t pte)
  3061. +{
  3062. + pte_set_bits(pte, _PAGE_ACCESSED);
  3063. + return(pte);
  3064. +}
  3065. +
  3066. +static inline pte_t pte_mkwrite(pte_t pte)
  3067. +{
  3068. + pte_set_bits(pte, _PAGE_RW);
  3069. + return(pte_mknewprot(pte));
  3070. +}
  3071. +
  3072. +static inline pte_t pte_mkuptodate(pte_t pte)
  3073. +{
  3074. + pte_clear_bits(pte, _PAGE_NEWPAGE);
  3075. + if(pte_present(pte))
  3076. + pte_clear_bits(pte, _PAGE_NEWPROT);
  3077. + return(pte);
  3078. +}
  3079. +
  3080. +static inline pte_t pte_mknewpage(pte_t pte)
  3081. +{
  3082. + pte_set_bits(pte, _PAGE_NEWPAGE);
  3083. + return(pte);
  3084. +}
  3085. +
  3086. +static inline pte_t pte_mkspecial(pte_t pte)
  3087. +{
  3088. + return(pte);
  3089. +}
  3090. +
  3091. +static inline void set_pte(pte_t *pteptr, pte_t pteval)
  3092. +{
  3093. + pte_copy(*pteptr, pteval);
  3094. +
  3095. + /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
  3096. + * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
  3097. + * mapped pages.
  3098. + */
  3099. +
  3100. + *pteptr = pte_mknewpage(*pteptr);
  3101. + if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
  3102. +}
  3103. +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
  3104. +
  3105. +#define __HAVE_ARCH_PTE_SAME
  3106. +static inline int pte_same(pte_t pte_a, pte_t pte_b)
  3107. +{
  3108. + return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
  3109. +}
  3110. +
  3111. +/*
  3112. + * Conversion functions: convert a page and protection to a page entry,
  3113. + * and a page entry and page directory to the page they refer to.
  3114. + */
  3115. +
  3116. +#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
  3117. +#define __virt_to_page(virt) phys_to_page(__pa(virt))
  3118. +#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
  3119. +#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
  3120. +
  3121. +#define mk_pte(page, pgprot) \
  3122. + ({ pte_t pte; \
  3123. + \
  3124. + pte_set_val(pte, page_to_phys(page), (pgprot)); \
  3125. + if (pte_present(pte)) \
  3126. + pte_mknewprot(pte_mknewpage(pte)); \
  3127. + pte;})
  3128. +
  3129. +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  3130. +{
  3131. + pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
  3132. + return pte;
  3133. +}
  3134. +
  3135. +/*
  3136. + * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  3137. + *
  3138. + * this macro returns the index of the entry in the pgd page which would
  3139. + * control the given virtual address
  3140. + */
  3141. +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
  3142. +
  3143. +/*
  3144. + * pgd_offset() returns a (pgd_t *)
  3145. + * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  3146. + */
  3147. +#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
  3148. +
  3149. +/*
  3150. + * a shortcut which implies the use of the kernel's pgd, instead
  3151. + * of a process's
  3152. + */
  3153. +#define pgd_offset_k(address) pgd_offset(&init_mm, address)
  3154. +
  3155. +/*
  3156. + * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
  3157. + *
  3158. + * this macro returns the index of the entry in the pmd page which would
  3159. + * control the given virtual address
  3160. + */
  3161. +#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
  3162. +#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
  3163. +
  3164. +#define pmd_page_vaddr(pmd) \
  3165. + ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
  3166. +
  3167. +/*
  3168. + * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
  3169. + *
  3170. + * this macro returns the index of the entry in the pte page which would
  3171. + * control the given virtual address
  3172. + */
  3173. +#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  3174. +#define pte_offset_kernel(dir, address) \
  3175. + ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
  3176. +#define pte_offset_map(dir, address) \
  3177. + ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
  3178. +#define pte_unmap(pte) do { } while (0)
  3179. +
  3180. +struct mm_struct;
  3181. +extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
  3182. +
  3183. +#define update_mmu_cache(vma,address,ptep) do ; while (0)
  3184. +
  3185. +/* Encode and de-code a swap entry */
  3186. +#define __swp_type(x) (((x).val >> 5) & 0x1f)
  3187. +#define __swp_offset(x) ((x).val >> 11)
  3188. +
  3189. +#define __swp_entry(type, offset) \
  3190. + ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
  3191. +#define __pte_to_swp_entry(pte) \
  3192. + ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
  3193. +#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  3194. +
  3195. +#define kern_addr_valid(addr) (1)
  3196. +
  3197. +#include <asm-generic/pgtable.h>
  3198. +
  3199. +/* Clear a kernel PTE and flush it from the TLB */
  3200. +#define kpte_clear_flush(ptep, vaddr) \
  3201. +do { \
  3202. + pte_clear(&init_mm, (vaddr), (ptep)); \
  3203. + __flush_tlb_one((vaddr)); \
  3204. +} while (0)
  3205. +
  3206. +#endif
  3207. --- /dev/null
  3208. +++ b/arch/um/include/uapi/asm/processor-generic.h
  3209. @@ -0,0 +1,115 @@
  3210. +/*
  3211. + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3212. + * Licensed under the GPL
  3213. + */
  3214. +
  3215. +#ifndef __UM_PROCESSOR_GENERIC_H
  3216. +#define __UM_PROCESSOR_GENERIC_H
  3217. +
  3218. +struct pt_regs;
  3219. +
  3220. +struct task_struct;
  3221. +
  3222. +#include <asm/ptrace.h>
  3223. +#include <registers.h>
  3224. +#include <sysdep/archsetjmp.h>
  3225. +
  3226. +#include <linux/prefetch.h>
  3227. +
  3228. +struct mm_struct;
  3229. +
  3230. +struct thread_struct {
  3231. + struct pt_regs regs;
  3232. + struct pt_regs *segv_regs;
  3233. + int singlestep_syscall;
  3234. + void *fault_addr;
  3235. + jmp_buf *fault_catcher;
  3236. + struct task_struct *prev_sched;
  3237. + struct arch_thread arch;
  3238. + jmp_buf switch_buf;
  3239. + struct {
  3240. + int op;
  3241. + union {
  3242. + struct {
  3243. + int pid;
  3244. + } fork, exec;
  3245. + struct {
  3246. + int (*proc)(void *);
  3247. + void *arg;
  3248. + } thread;
  3249. + struct {
  3250. + void (*proc)(void *);
  3251. + void *arg;
  3252. + } cb;
  3253. + } u;
  3254. + } request;
  3255. +};
  3256. +
  3257. +#define INIT_THREAD \
  3258. +{ \
  3259. + .regs = EMPTY_REGS, \
  3260. + .fault_addr = NULL, \
  3261. + .prev_sched = NULL, \
  3262. + .arch = INIT_ARCH_THREAD, \
  3263. + .request = { 0 } \
  3264. +}
  3265. +
  3266. +static inline void release_thread(struct task_struct *task)
  3267. +{
  3268. +}
  3269. +
  3270. +extern unsigned long thread_saved_pc(struct task_struct *t);
  3271. +
  3272. +static inline void mm_copy_segments(struct mm_struct *from_mm,
  3273. + struct mm_struct *new_mm)
  3274. +{
  3275. +}
  3276. +
  3277. +#define init_stack (init_thread_union.stack)
  3278. +
  3279. +/*
  3280. + * User space process size: 3GB (default).
  3281. + */
  3282. +extern unsigned long task_size;
  3283. +
  3284. +#define TASK_SIZE (task_size)
  3285. +
  3286. +#undef STACK_TOP
  3287. +#undef STACK_TOP_MAX
  3288. +
  3289. +extern unsigned long stacksizelim;
  3290. +
  3291. +#define STACK_ROOM (stacksizelim)
  3292. +#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
  3293. +#define STACK_TOP_MAX STACK_TOP
  3294. +
  3295. +/* This decides where the kernel will search for a free chunk of vm
  3296. + * space during mmap's.
  3297. + */
  3298. +#define TASK_UNMAPPED_BASE (0x40000000)
  3299. +
  3300. +extern void start_thread(struct pt_regs *regs, unsigned long entry,
  3301. + unsigned long stack);
  3302. +
  3303. +struct cpuinfo_um {
  3304. + unsigned long loops_per_jiffy;
  3305. + int ipi_pipe[2];
  3306. +};
  3307. +
  3308. +extern struct cpuinfo_um boot_cpu_data;
  3309. +
  3310. +#define my_cpu_data cpu_data[smp_processor_id()]
  3311. +
  3312. +#ifdef CONFIG_SMP
  3313. +extern struct cpuinfo_um cpu_data[];
  3314. +#define current_cpu_data cpu_data[smp_processor_id()]
  3315. +#else
  3316. +#define cpu_data (&boot_cpu_data)
  3317. +#define current_cpu_data boot_cpu_data
  3318. +#endif
  3319. +
  3320. +
  3321. +#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
  3322. +extern unsigned long get_wchan(struct task_struct *p);
  3323. +
  3324. +#endif
  3325. --- /dev/null
  3326. +++ b/arch/um/include/uapi/asm/ptrace-generic.h
  3327. @@ -0,0 +1,45 @@
  3328. +/*
  3329. + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3330. + * Licensed under the GPL
  3331. + */
  3332. +
  3333. +#ifndef __UM_PTRACE_GENERIC_H
  3334. +#define __UM_PTRACE_GENERIC_H
  3335. +
  3336. +#ifndef __ASSEMBLY__
  3337. +
  3338. +#include <asm/ptrace-abi.h>
  3339. +#include <sysdep/ptrace.h>
  3340. +
  3341. +struct pt_regs {
  3342. + struct uml_pt_regs regs;
  3343. +};
  3344. +
  3345. +#define arch_has_single_step() (1)
  3346. +
  3347. +#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
  3348. +
  3349. +#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
  3350. +#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
  3351. +
  3352. +#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs)
  3353. +
  3354. +#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs)
  3355. +
  3356. +#define instruction_pointer(regs) PT_REGS_IP(regs)
  3357. +
  3358. +struct task_struct;
  3359. +
  3360. +extern long subarch_ptrace(struct task_struct *child, long request,
  3361. + unsigned long addr, unsigned long data);
  3362. +extern unsigned long getreg(struct task_struct *child, int regno);
  3363. +extern int putreg(struct task_struct *child, int regno, unsigned long value);
  3364. +
  3365. +extern int arch_copy_tls(struct task_struct *new);
  3366. +extern void clear_flushed_tls(struct task_struct *task);
  3367. +extern void syscall_trace_enter(struct pt_regs *regs);
  3368. +extern void syscall_trace_leave(struct pt_regs *regs);
  3369. +
  3370. +#endif
  3371. +
  3372. +#endif
  3373. --- /dev/null
  3374. +++ b/arch/um/include/uapi/asm/setup.h
  3375. @@ -0,0 +1,10 @@
  3376. +#ifndef SETUP_H_INCLUDED
  3377. +#define SETUP_H_INCLUDED
  3378. +
  3379. +/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the
  3380. + * command line, so this choice is ok.
  3381. + */
  3382. +
  3383. +#define COMMAND_LINE_SIZE 4096
  3384. +
  3385. +#endif /* SETUP_H_INCLUDED */
  3386. --- /dev/null
  3387. +++ b/arch/um/include/uapi/asm/smp.h
  3388. @@ -0,0 +1,32 @@
  3389. +#ifndef __UM_SMP_H
  3390. +#define __UM_SMP_H
  3391. +
  3392. +#ifdef CONFIG_SMP
  3393. +
  3394. +#include <linux/bitops.h>
  3395. +#include <asm/current.h>
  3396. +#include <linux/cpumask.h>
  3397. +
  3398. +#define raw_smp_processor_id() (current_thread->cpu)
  3399. +
  3400. +#define cpu_logical_map(n) (n)
  3401. +#define cpu_number_map(n) (n)
  3402. +extern int hard_smp_processor_id(void);
  3403. +#define NO_PROC_ID -1
  3404. +
  3405. +extern int ncpus;
  3406. +
  3407. +
  3408. +static inline void smp_cpus_done(unsigned int maxcpus)
  3409. +{
  3410. +}
  3411. +
  3412. +extern struct task_struct *idle_threads[NR_CPUS];
  3413. +
  3414. +#else
  3415. +
  3416. +#define hard_smp_processor_id() 0
  3417. +
  3418. +#endif
  3419. +
  3420. +#endif
  3421. --- /dev/null
  3422. +++ b/arch/um/include/uapi/asm/stacktrace.h
  3423. @@ -0,0 +1,42 @@
  3424. +#ifndef _ASM_UML_STACKTRACE_H
  3425. +#define _ASM_UML_STACKTRACE_H
  3426. +
  3427. +#include <linux/uaccess.h>
  3428. +#include <linux/ptrace.h>
  3429. +
  3430. +struct stack_frame {
  3431. + struct stack_frame *next_frame;
  3432. + unsigned long return_address;
  3433. +};
  3434. +
  3435. +struct stacktrace_ops {
  3436. + void (*address)(void *data, unsigned long address, int reliable);
  3437. +};
  3438. +
  3439. +#ifdef CONFIG_FRAME_POINTER
  3440. +static inline unsigned long
  3441. +get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
  3442. +{
  3443. + if (!task || task == current)
  3444. + return segv_regs ? PT_REGS_BP(segv_regs) : current_bp();
  3445. + return KSTK_EBP(task);
  3446. +}
  3447. +#else
  3448. +static inline unsigned long
  3449. +get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
  3450. +{
  3451. + return 0;
  3452. +}
  3453. +#endif
  3454. +
  3455. +static inline unsigned long
  3456. +*get_stack_pointer(struct task_struct *task, struct pt_regs *segv_regs)
  3457. +{
  3458. + if (!task || task == current)
  3459. + return segv_regs ? (unsigned long *)PT_REGS_SP(segv_regs) : current_sp();
  3460. + return (unsigned long *)KSTK_ESP(task);
  3461. +}
  3462. +
  3463. +void dump_trace(struct task_struct *tsk, const struct stacktrace_ops *ops, void *data);
  3464. +
  3465. +#endif /* _ASM_UML_STACKTRACE_H */
  3466. --- /dev/null
  3467. +++ b/arch/um/include/uapi/asm/sysrq.h
  3468. @@ -0,0 +1,7 @@
  3469. +#ifndef __UM_SYSRQ_H
  3470. +#define __UM_SYSRQ_H
  3471. +
  3472. +struct task_struct;
  3473. +extern void show_trace(struct task_struct* task, unsigned long *stack);
  3474. +
  3475. +#endif
  3476. --- /dev/null
  3477. +++ b/arch/um/include/uapi/asm/thread_info.h
  3478. @@ -0,0 +1,78 @@
  3479. +/*
  3480. + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3481. + * Licensed under the GPL
  3482. + */
  3483. +
  3484. +#ifndef __UM_THREAD_INFO_H
  3485. +#define __UM_THREAD_INFO_H
  3486. +
  3487. +#ifndef __ASSEMBLY__
  3488. +
  3489. +#include <asm/types.h>
  3490. +#include <asm/page.h>
  3491. +#include <asm/uaccess.h>
  3492. +
  3493. +struct thread_info {
  3494. + struct task_struct *task; /* main task structure */
  3495. + struct exec_domain *exec_domain; /* execution domain */
  3496. + unsigned long flags; /* low level flags */
  3497. + __u32 cpu; /* current CPU */
  3498. + int preempt_count; /* 0 => preemptable,
  3499. + <0 => BUG */
  3500. + mm_segment_t addr_limit; /* thread address space:
  3501. + 0-0xBFFFFFFF for user
  3502. + 0-0xFFFFFFFF for kernel */
  3503. + struct restart_block restart_block;
  3504. + struct thread_info *real_thread; /* Points to non-IRQ stack */
  3505. +};
  3506. +
  3507. +#define INIT_THREAD_INFO(tsk) \
  3508. +{ \
  3509. + .task = &tsk, \
  3510. + .exec_domain = &default_exec_domain, \
  3511. + .flags = 0, \
  3512. + .cpu = 0, \
  3513. + .preempt_count = INIT_PREEMPT_COUNT, \
  3514. + .addr_limit = KERNEL_DS, \
  3515. + .restart_block = { \
  3516. + .fn = do_no_restart_syscall, \
  3517. + }, \
  3518. + .real_thread = NULL, \
  3519. +}
  3520. +
  3521. +#define init_thread_info (init_thread_union.thread_info)
  3522. +#define init_stack (init_thread_union.stack)
  3523. +
  3524. +#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
  3525. +/* how to get the thread information struct from C */
  3526. +static inline struct thread_info *current_thread_info(void)
  3527. +{
  3528. + struct thread_info *ti;
  3529. + unsigned long mask = THREAD_SIZE - 1;
  3530. + void *p;
  3531. +
  3532. + asm volatile ("" : "=r" (p) : "0" (&ti));
  3533. + ti = (struct thread_info *) (((unsigned long)p) & ~mask);
  3534. + return ti;
  3535. +}
  3536. +
  3537. +#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
  3538. +
  3539. +#endif
  3540. +
  3541. +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
  3542. +#define TIF_SIGPENDING 1 /* signal pending */
  3543. +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
  3544. +#define TIF_RESTART_BLOCK 4
  3545. +#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
  3546. +#define TIF_SYSCALL_AUDIT 6
  3547. +#define TIF_RESTORE_SIGMASK 7
  3548. +#define TIF_NOTIFY_RESUME 8
  3549. +
  3550. +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
  3551. +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
  3552. +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
  3553. +#define _TIF_MEMDIE (1 << TIF_MEMDIE)
  3554. +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
  3555. +
  3556. +#endif
  3557. --- /dev/null
  3558. +++ b/arch/um/include/uapi/asm/timex.h
  3559. @@ -0,0 +1,13 @@
  3560. +#ifndef __UM_TIMEX_H
  3561. +#define __UM_TIMEX_H
  3562. +
  3563. +typedef unsigned long cycles_t;
  3564. +
  3565. +static inline cycles_t get_cycles (void)
  3566. +{
  3567. + return 0;
  3568. +}
  3569. +
  3570. +#define CLOCK_TICK_RATE (HZ)
  3571. +
  3572. +#endif
  3573. --- /dev/null
  3574. +++ b/arch/um/include/uapi/asm/tlb.h
  3575. @@ -0,0 +1,134 @@
  3576. +#ifndef __UM_TLB_H
  3577. +#define __UM_TLB_H
  3578. +
  3579. +#include <linux/pagemap.h>
  3580. +#include <linux/swap.h>
  3581. +#include <asm/percpu.h>
  3582. +#include <asm/pgalloc.h>
  3583. +#include <asm/tlbflush.h>
  3584. +
  3585. +#define tlb_start_vma(tlb, vma) do { } while (0)
  3586. +#define tlb_end_vma(tlb, vma) do { } while (0)
  3587. +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
  3588. +
  3589. +/* struct mmu_gather is an opaque type used by the mm code for passing around
  3590. + * any data needed by arch specific code for tlb_remove_page.
  3591. + */
  3592. +struct mmu_gather {
  3593. + struct mm_struct *mm;
  3594. + unsigned int need_flush; /* Really unmapped some ptes? */
  3595. + unsigned long start;
  3596. + unsigned long end;
  3597. + unsigned int fullmm; /* non-zero means full mm flush */
  3598. +};
  3599. +
  3600. +static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
  3601. + unsigned long address)
  3602. +{
  3603. + if (tlb->start > address)
  3604. + tlb->start = address;
  3605. + if (tlb->end < address + PAGE_SIZE)
  3606. + tlb->end = address + PAGE_SIZE;
  3607. +}
  3608. +
  3609. +static inline void init_tlb_gather(struct mmu_gather *tlb)
  3610. +{
  3611. + tlb->need_flush = 0;
  3612. +
  3613. + tlb->start = TASK_SIZE;
  3614. + tlb->end = 0;
  3615. +
  3616. + if (tlb->fullmm) {
  3617. + tlb->start = 0;
  3618. + tlb->end = TASK_SIZE;
  3619. + }
  3620. +}
  3621. +
  3622. +static inline void
  3623. +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
  3624. +{
  3625. + tlb->mm = mm;
  3626. + tlb->start = start;
  3627. + tlb->end = end;
  3628. + tlb->fullmm = !(start | (end+1));
  3629. +
  3630. + init_tlb_gather(tlb);
  3631. +}
  3632. +
  3633. +extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
  3634. + unsigned long end);
  3635. +
  3636. +static inline void
  3637. +tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  3638. +{
  3639. + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
  3640. +}
  3641. +
  3642. +static inline void
  3643. +tlb_flush_mmu_free(struct mmu_gather *tlb)
  3644. +{
  3645. + init_tlb_gather(tlb);
  3646. +}
  3647. +
  3648. +static inline void
  3649. +tlb_flush_mmu(struct mmu_gather *tlb)
  3650. +{
  3651. + if (!tlb->need_flush)
  3652. + return;
  3653. +
  3654. + tlb_flush_mmu_tlbonly(tlb);
  3655. + tlb_flush_mmu_free(tlb);
  3656. +}
  3657. +
  3658. +/* tlb_finish_mmu
  3659. + * Called at the end of the shootdown operation to free up any resources
  3660. + * that were required.
  3661. + */
  3662. +static inline void
  3663. +tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  3664. +{
  3665. + tlb_flush_mmu(tlb);
  3666. +
  3667. + /* keep the page table cache within bounds */
  3668. + check_pgt_cache();
  3669. +}
  3670. +
  3671. +/* tlb_remove_page
  3672. + * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
  3673. + * while handling the additional races in SMP caused by other CPUs
  3674. + * caching valid mappings in their TLBs.
  3675. + */
  3676. +static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  3677. +{
  3678. + tlb->need_flush = 1;
  3679. + free_page_and_swap_cache(page);
  3680. + return 1; /* avoid calling tlb_flush_mmu */
  3681. +}
  3682. +
  3683. +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  3684. +{
  3685. + __tlb_remove_page(tlb, page);
  3686. +}
  3687. +
  3688. +/**
  3689. + * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
  3690. + *
  3691. + * Record the fact that pte's were really umapped in ->need_flush, so we can
  3692. + * later optimise away the tlb invalidate. This helps when userspace is
  3693. + * unmapping already-unmapped pages, which happens quite a lot.
  3694. + */
  3695. +#define tlb_remove_tlb_entry(tlb, ptep, address) \
  3696. + do { \
  3697. + tlb->need_flush = 1; \
  3698. + __tlb_remove_tlb_entry(tlb, ptep, address); \
  3699. + } while (0)
  3700. +
  3701. +#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
  3702. +
  3703. +#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
  3704. +
  3705. +#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
  3706. +
  3707. +#define tlb_migrate_finish(mm) do {} while (0)
  3708. +
  3709. +#endif
  3710. --- /dev/null
  3711. +++ b/arch/um/include/uapi/asm/tlbflush.h
  3712. @@ -0,0 +1,31 @@
  3713. +/*
  3714. + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3715. + * Licensed under the GPL
  3716. + */
  3717. +
  3718. +#ifndef __UM_TLBFLUSH_H
  3719. +#define __UM_TLBFLUSH_H
  3720. +
  3721. +#include <linux/mm.h>
  3722. +
  3723. +/*
  3724. + * TLB flushing:
  3725. + *
  3726. + * - flush_tlb() flushes the current mm struct TLBs
  3727. + * - flush_tlb_all() flushes all processes TLBs
  3728. + * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  3729. + * - flush_tlb_page(vma, vmaddr) flushes one page
  3730. + * - flush_tlb_kernel_vm() flushes the kernel vm area
  3731. + * - flush_tlb_range(vma, start, end) flushes a range of pages
  3732. + */
  3733. +
  3734. +extern void flush_tlb_all(void);
  3735. +extern void flush_tlb_mm(struct mm_struct *mm);
  3736. +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  3737. + unsigned long end);
  3738. +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
  3739. +extern void flush_tlb_kernel_vm(void);
  3740. +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
  3741. +extern void __flush_tlb_one(unsigned long addr);
  3742. +
  3743. +#endif
  3744. --- /dev/null
  3745. +++ b/arch/um/include/uapi/asm/uaccess.h
  3746. @@ -0,0 +1,178 @@
  3747. +/*
  3748. + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
  3749. + * Licensed under the GPL
  3750. + */
  3751. +
  3752. +#ifndef __UM_UACCESS_H
  3753. +#define __UM_UACCESS_H
  3754. +
  3755. +/* thread_info has a mm_segment_t in it, so put the definition up here */
  3756. +typedef struct {
  3757. + unsigned long seg;
  3758. +} mm_segment_t;
  3759. +
  3760. +#include <linux/thread_info.h>
  3761. +#include <linux/errno.h>
  3762. +#include <asm/processor.h>
  3763. +#include <asm/elf.h>
  3764. +
  3765. +#define VERIFY_READ 0
  3766. +#define VERIFY_WRITE 1
  3767. +
  3768. +/*
  3769. + * The fs value determines whether argument validity checking should be
  3770. + * performed or not. If get_fs() == USER_DS, checking is performed, with
  3771. + * get_fs() == KERNEL_DS, checking is bypassed.
  3772. + *
  3773. + * For historical reasons, these macros are grossly misnamed.
  3774. + */
  3775. +
  3776. +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
  3777. +
  3778. +#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
  3779. +#define USER_DS MAKE_MM_SEG(TASK_SIZE)
  3780. +
  3781. +#define get_ds() (KERNEL_DS)
  3782. +#define get_fs() (current_thread_info()->addr_limit)
  3783. +#define set_fs(x) (current_thread_info()->addr_limit = (x))
  3784. +
  3785. +#define segment_eq(a, b) ((a).seg == (b).seg)
  3786. +
  3787. +#define __under_task_size(addr, size) \
  3788. + (((unsigned long) (addr) < TASK_SIZE) && \
  3789. + (((unsigned long) (addr) + (size)) < TASK_SIZE))
  3790. +
  3791. +#define __access_ok_vsyscall(type, addr, size) \
  3792. + ((type == VERIFY_READ) && \
  3793. + ((unsigned long) (addr) >= FIXADDR_USER_START) && \
  3794. + ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
  3795. + ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
  3796. +
  3797. +#define __addr_range_nowrap(addr, size) \
  3798. + ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
  3799. +
  3800. +#define access_ok(type, addr, size) \
  3801. + (__addr_range_nowrap(addr, size) && \
  3802. + (__under_task_size(addr, size) || \
  3803. + __access_ok_vsyscall(type, addr, size) || \
  3804. + segment_eq(get_fs(), KERNEL_DS)))
  3805. +
  3806. +extern int copy_from_user(void *to, const void __user *from, int n);
  3807. +extern int copy_to_user(void __user *to, const void *from, int n);
  3808. +
  3809. +/*
  3810. + * strncpy_from_user: - Copy a NUL terminated string from userspace.
  3811. + * @dst: Destination address, in kernel space. This buffer must be at
  3812. + * least @count bytes long.
  3813. + * @src: Source address, in user space.
  3814. + * @count: Maximum number of bytes to copy, including the trailing NUL.
  3815. + *
  3816. + * Copies a NUL-terminated string from userspace to kernel space.
  3817. + *
  3818. + * On success, returns the length of the string (not including the trailing
  3819. + * NUL).
  3820. + *
  3821. + * If access to userspace fails, returns -EFAULT (some data may have been
  3822. + * copied).
  3823. + *
  3824. + * If @count is smaller than the length of the string, copies @count bytes
  3825. + * and returns @count.
  3826. + */
  3827. +
  3828. +extern int strncpy_from_user(char *dst, const char __user *src, int count);
  3829. +
  3830. +/*
  3831. + * __clear_user: - Zero a block of memory in user space, with less checking.
  3832. + * @to: Destination address, in user space.
  3833. + * @n: Number of bytes to zero.
  3834. + *
  3835. + * Zero a block of memory in user space. Caller must check
  3836. + * the specified block with access_ok() before calling this function.
  3837. + *
  3838. + * Returns number of bytes that could not be cleared.
  3839. + * On success, this will be zero.
  3840. + */
  3841. +extern int __clear_user(void __user *mem, int len);
  3842. +
  3843. +/*
  3844. + * clear_user: - Zero a block of memory in user space.
  3845. + * @to: Destination address, in user space.
  3846. + * @n: Number of bytes to zero.
  3847. + *
  3848. + * Zero a block of memory in user space.
  3849. + *
  3850. + * Returns number of bytes that could not be cleared.
  3851. + * On success, this will be zero.
  3852. + */
  3853. +extern int clear_user(void __user *mem, int len);
  3854. +
  3855. +/*
  3856. + * strlen_user: - Get the size of a string in user space.
  3857. + * @str: The string to measure.
  3858. + * @n: The maximum valid length
  3859. + *
  3860. + * Get the size of a NUL-terminated string in user space.
  3861. + *
  3862. + * Returns the size of the string INCLUDING the terminating NUL.
  3863. + * On exception, returns 0.
  3864. + * If the string is too long, returns a value greater than @n.
  3865. + */
  3866. +extern int strnlen_user(const void __user *str, int len);
  3867. +
  3868. +#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
  3869. +
  3870. +#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
  3871. +
  3872. +#define __copy_to_user_inatomic __copy_to_user
  3873. +#define __copy_from_user_inatomic __copy_from_user
  3874. +
  3875. +#define __get_user(x, ptr) \
  3876. +({ \
  3877. + const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
  3878. + __typeof__(x) __private_val; \
  3879. + int __private_ret = -EFAULT; \
  3880. + (x) = (__typeof__(*(__private_ptr)))0; \
  3881. + if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
  3882. + sizeof(*(__private_ptr))) == 0) { \
  3883. + (x) = (__typeof__(*(__private_ptr))) __private_val; \
  3884. + __private_ret = 0; \
  3885. + } \
  3886. + __private_ret; \
  3887. +})
  3888. +
  3889. +#define get_user(x, ptr) \
  3890. +({ \
  3891. + const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
  3892. + (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
  3893. + __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
  3894. +})
  3895. +
  3896. +#define __put_user(x, ptr) \
  3897. +({ \
  3898. + __typeof__(*(ptr)) __user *__private_ptr = ptr; \
  3899. + __typeof__(*(__private_ptr)) __private_val; \
  3900. + int __private_ret = -EFAULT; \
  3901. + __private_val = (__typeof__(*(__private_ptr))) (x); \
  3902. + if (__copy_to_user((__private_ptr), &__private_val, \
  3903. + sizeof(*(__private_ptr))) == 0) { \
  3904. + __private_ret = 0; \
  3905. + } \
  3906. + __private_ret; \
  3907. +})
  3908. +
  3909. +#define put_user(x, ptr) \
  3910. +({ \
  3911. + __typeof__(*(ptr)) __user *private_ptr = (ptr); \
  3912. + (access_ok(VERIFY_WRITE, private_ptr, sizeof(*private_ptr)) ? \
  3913. + __put_user(x, private_ptr) : -EFAULT); \
  3914. +})
  3915. +
  3916. +#define strlen_user(str) strnlen_user(str, ~0U >> 1)
  3917. +
  3918. +struct exception_table_entry
  3919. +{
  3920. + unsigned long insn;
  3921. + unsigned long fixup;
  3922. +};
  3923. +
  3924. +#endif