ffs_vfsops.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133
  1. /*-
  2. * Copyright (c) 1989, 1991, 1993, 1994
  3. * The Regents of the University of California. All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * 2. Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in the
  12. * documentation and/or other materials provided with the distribution.
  13. * 3. Neither the name of the University nor the names of its contributors
  14. * may be used to endorse or promote products derived from this software
  15. * without specific prior written permission.
  16. *
  17. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  18. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  21. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  22. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  23. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  24. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  25. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  26. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  27. * SUCH DAMAGE.
  28. *
  29. * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
  30. */
  31. #include <limits.h>
  32. #include "u.h"
  33. #include "../port/lib.h"
  34. #include "mem.h"
  35. #include "dat.h"
  36. #include "../port/portfns.h"
  37. #include <ufs/ufsdat.h>
  38. #include <ufs/freebsd_util.h>
  39. #include "ufs_ext.h"
  40. #include "ufs_mountpoint.h"
  41. #include "ufs_harvey.h"
  42. //#include "dir.h"
  43. //#include "extattr.h"
  44. #include "ufs/quota.h"
  45. #include "ufs/inode.h"
  46. #include "softdep.h"
  47. #include "ufs/dinode.h"
  48. #include "ufs_extern.h"
  49. #include "ufs/fs.h"
  50. #include "ffs_extern.h"
  51. #include "ufs/ufsmount.h"
  52. //static uma_zone_t uma_inode, uma_ufs1, uma_ufs2;
  53. static int ffs_mountfs(vnode *, MountPoint *, thread *);
  54. static void ffs_ifree(ufsmount *ump, inode *ip);
  55. #if 0
  56. static int ffs_sync_lazy(struct mount *mp);
  57. static vfs_init_t ffs_init;
  58. static vfs_uninit_t ffs_uninit;
  59. static vfs_extattrctl_t ffs_extattrctl;
  60. static vfs_cmount_t ffs_cmount;
  61. static vfs_unmount_t ffs_unmount;
  62. static vfs_mount_t ffs_mount;
  63. static vfs_statfs_t ffs_statfs;
  64. static vfs_fhtovp_t ffs_fhtovp;
  65. static vfs_sync_t ffs_sync;
  66. VFS_SET(ufs_vfsops, ufs, 0);
  67. MODULE_VERSION(ufs, 1);
  68. static b_strategy_t ffs_geom_strategy;
  69. static b_write_t ffs_bufwrite;
  70. /*
  71. * Note that userquota and groupquota options are not currently used
  72. * by UFS/FFS code and generally mount(8) does not pass those options
  73. * from userland, but they can be passed by loader(8) via
  74. * vfs.root.mountfrom.options.
  75. */
  76. static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr",
  77. "noclusterw", "noexec", "export", "force", "from", "groupquota",
  78. "multilabel", "nfsv4acls", "fsckpid", "snapshot", "nosuid", "suiddir",
  79. "nosymfollow", "sync", "union", "userquota", nil };
  80. #endif // 0
  81. int
  82. ffs_mount(MountPoint *mp)
  83. {
  84. vnode *devvp = nil;
  85. thread *td = nil;
  86. int error;
  87. #if 0
  88. struct ufsmount *ump = nil;
  89. struct fs *fs;
  90. pid_t fsckpid = 0;
  91. int error, error1, flags;
  92. uint64_t mntorflags;
  93. accmode_t accmode;
  94. struct nameidata ndp;
  95. char *fspec;
  96. td = curthread;
  97. if (vfs_filteropt(mp->mnt_optnew, ffs_opts))
  98. return (EINVAL);
  99. if (uma_inode == nil) {
  100. uma_inode = uma_zcreate("FFS inode",
  101. sizeof(struct inode), nil, nil, nil, nil,
  102. UMA_ALIGN_PTR, 0);
  103. uma_ufs1 = uma_zcreate("FFS1 dinode",
  104. sizeof(struct ufs1_dinode), nil, nil, nil, nil,
  105. UMA_ALIGN_PTR, 0);
  106. uma_ufs2 = uma_zcreate("FFS2 dinode",
  107. sizeof(struct ufs2_dinode), nil, nil, nil, nil,
  108. UMA_ALIGN_PTR, 0);
  109. }
  110. vfs_deleteopt(mp->mnt_optnew, "groupquota");
  111. vfs_deleteopt(mp->mnt_optnew, "userquota");
  112. fspec = vfs_getopts(mp->mnt_optnew, "from", &error);
  113. if (error)
  114. return (error);
  115. mntorflags = 0;
  116. if (vfs_getopt(mp->mnt_optnew, "acls", nil, nil) == 0)
  117. mntorflags |= MNT_ACLS;
  118. if (vfs_getopt(mp->mnt_optnew, "snapshot", nil, nil) == 0) {
  119. mntorflags |= MNT_SNAPSHOT;
  120. /*
  121. * Once we have set the MNT_SNAPSHOT flag, do not
  122. * persist "snapshot" in the options list.
  123. */
  124. vfs_deleteopt(mp->mnt_optnew, "snapshot");
  125. vfs_deleteopt(mp->mnt_opt, "snapshot");
  126. }
  127. if (vfs_getopt(mp->mnt_optnew, "fsckpid", nil, nil) == 0 &&
  128. vfs_scanopt(mp->mnt_optnew, "fsckpid", "%d", &fsckpid) == 1) {
  129. /*
  130. * Once we have set the restricted PID, do not
  131. * persist "fsckpid" in the options list.
  132. */
  133. vfs_deleteopt(mp->mnt_optnew, "fsckpid");
  134. vfs_deleteopt(mp->mnt_opt, "fsckpid");
  135. if (mp->mnt_flag & MNT_UPDATE) {
  136. if (VFSTOUFS(mp)->um_fs->fs_ronly == 0 &&
  137. vfs_flagopt(mp->mnt_optnew, "ro", nil, 0) == 0) {
  138. vfs_mount_error(mp,
  139. "Checker enable: Must be read-only");
  140. return (EINVAL);
  141. }
  142. } else if (vfs_flagopt(mp->mnt_optnew, "ro", nil, 0) == 0) {
  143. vfs_mount_error(mp,
  144. "Checker enable: Must be read-only");
  145. return (EINVAL);
  146. }
  147. /* Set to -1 if we are done */
  148. if (fsckpid == 0)
  149. fsckpid = -1;
  150. }
  151. if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", nil, nil) == 0) {
  152. if (mntorflags & MNT_ACLS) {
  153. vfs_mount_error(mp,
  154. "\"acls\" and \"nfsv4acls\" options "
  155. "are mutually exclusive");
  156. return (EINVAL);
  157. }
  158. mntorflags |= MNT_NFS4ACLS;
  159. }
  160. MNT_ILOCK(mp);
  161. mp->mnt_flag |= mntorflags;
  162. MNT_IUNLOCK(mp);
  163. /*
  164. * If updating, check whether changing from read-only to
  165. * read/write; if there is no device name, that's all we do.
  166. */
  167. if (mp->mnt_flag & MNT_UPDATE) {
  168. ump = VFSTOUFS(mp);
  169. fs = ump->um_fs;
  170. devvp = ump->um_devvp;
  171. if (fsckpid == -1 && ump->um_fsckpid > 0) {
  172. if ((error = ffs_flushfiles(mp, WRITECLOSE, td)) != 0 ||
  173. (error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0)
  174. return (error);
  175. g_topology_lock();
  176. /*
  177. * Return to normal read-only mode.
  178. */
  179. error = g_access(ump->um_cp, 0, -1, 0);
  180. g_topology_unlock();
  181. ump->um_fsckpid = 0;
  182. }
  183. if (fs->fs_ronly == 0 &&
  184. vfs_flagopt(mp->mnt_optnew, "ro", nil, 0)) {
  185. /*
  186. * Flush any dirty data and suspend filesystem.
  187. */
  188. if ((error = vn_start_write(nil, &mp, V_WAIT)) != 0)
  189. return (error);
  190. error = vfs_write_suspend_umnt(mp);
  191. if (error != 0)
  192. return (error);
  193. /*
  194. * Check for and optionally get rid of files open
  195. * for writing.
  196. */
  197. flags = WRITECLOSE;
  198. if (mp->mnt_flag & MNT_FORCE)
  199. flags |= FORCECLOSE;
  200. if (MOUNTEDSOFTDEP(mp)) {
  201. error = softdep_flushfiles(mp, flags, td);
  202. } else {
  203. error = ffs_flushfiles(mp, flags, td);
  204. }
  205. if (error) {
  206. vfs_write_resume(mp, 0);
  207. return (error);
  208. }
  209. if (fs->fs_pendingblocks != 0 ||
  210. fs->fs_pendinginodes != 0) {
  211. printf("WARNING: %s Update error: blocks %jd "
  212. "files %d\n", fs->fs_fsmnt,
  213. (intmax_t)fs->fs_pendingblocks,
  214. fs->fs_pendinginodes);
  215. fs->fs_pendingblocks = 0;
  216. fs->fs_pendinginodes = 0;
  217. }
  218. if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
  219. fs->fs_clean = 1;
  220. if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
  221. fs->fs_ronly = 0;
  222. fs->fs_clean = 0;
  223. vfs_write_resume(mp, 0);
  224. return (error);
  225. }
  226. if (MOUNTEDSOFTDEP(mp))
  227. softdep_unmount(mp);
  228. g_topology_lock();
  229. /*
  230. * Drop our write and exclusive access.
  231. */
  232. g_access(ump->um_cp, 0, -1, -1);
  233. g_topology_unlock();
  234. fs->fs_ronly = 1;
  235. MNT_ILOCK(mp);
  236. mp->mnt_flag |= MNT_RDONLY;
  237. MNT_IUNLOCK(mp);
  238. /*
  239. * Allow the writers to note that filesystem
  240. * is ro now.
  241. */
  242. vfs_write_resume(mp, 0);
  243. }
  244. if ((mp->mnt_flag & MNT_RELOAD) &&
  245. (error = ffs_reload(mp, td, 0)) != 0)
  246. return (error);
  247. if (fs->fs_ronly &&
  248. !vfs_flagopt(mp->mnt_optnew, "ro", nil, 0)) {
  249. /*
  250. * If we are running a checker, do not allow upgrade.
  251. */
  252. if (ump->um_fsckpid > 0) {
  253. vfs_mount_error(mp,
  254. "Active checker, cannot upgrade to write");
  255. return (EINVAL);
  256. }
  257. /*
  258. * If upgrade to read-write by non-root, then verify
  259. * that user has necessary permissions on the device.
  260. */
  261. vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
  262. error = VOP_ACCESS(devvp, VREAD | VWRITE,
  263. td->td_ucred, td);
  264. if (error)
  265. error = priv_check(td, PRIV_VFS_MOUNT_PERM);
  266. if (error) {
  267. VOP_UNLOCK(devvp, 0);
  268. return (error);
  269. }
  270. VOP_UNLOCK(devvp, 0);
  271. fs->fs_flags &= ~FS_UNCLEAN;
  272. if (fs->fs_clean == 0) {
  273. fs->fs_flags |= FS_UNCLEAN;
  274. if ((mp->mnt_flag & MNT_FORCE) ||
  275. ((fs->fs_flags &
  276. (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
  277. (fs->fs_flags & FS_DOSOFTDEP))) {
  278. printf("WARNING: %s was not properly "
  279. "dismounted\n", fs->fs_fsmnt);
  280. } else {
  281. vfs_mount_error(mp,
  282. "R/W mount of %s denied. %s.%s",
  283. fs->fs_fsmnt,
  284. "Filesystem is not clean - run fsck",
  285. (fs->fs_flags & FS_SUJ) == 0 ? "" :
  286. " Forced mount will invalidate"
  287. " journal contents");
  288. return (EPERM);
  289. }
  290. }
  291. g_topology_lock();
  292. /*
  293. * Request exclusive write access.
  294. */
  295. error = g_access(ump->um_cp, 0, 1, 1);
  296. g_topology_unlock();
  297. if (error)
  298. return (error);
  299. if ((error = vn_start_write(nil, &mp, V_WAIT)) != 0)
  300. return (error);
  301. fs->fs_ronly = 0;
  302. MNT_ILOCK(mp);
  303. mp->mnt_flag &= ~MNT_RDONLY;
  304. MNT_IUNLOCK(mp);
  305. fs->fs_mtime = time_second;
  306. /* check to see if we need to start softdep */
  307. if ((fs->fs_flags & FS_DOSOFTDEP) &&
  308. (error = softdep_mount(devvp, mp, fs, td->td_ucred))){
  309. vn_finished_write(mp);
  310. return (error);
  311. }
  312. fs->fs_clean = 0;
  313. if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
  314. vn_finished_write(mp);
  315. return (error);
  316. }
  317. if (fs->fs_snapinum[0] != 0)
  318. ffs_snapshot_mount(mp);
  319. vn_finished_write(mp);
  320. }
  321. /*
  322. * Soft updates is incompatible with "async",
  323. * so if we are doing softupdates stop the user
  324. * from setting the async flag in an update.
  325. * Softdep_mount() clears it in an initial mount
  326. * or ro->rw remount.
  327. */
  328. if (MOUNTEDSOFTDEP(mp)) {
  329. /* XXX: Reset too late ? */
  330. MNT_ILOCK(mp);
  331. mp->mnt_flag &= ~MNT_ASYNC;
  332. MNT_IUNLOCK(mp);
  333. }
  334. /*
  335. * Keep MNT_ACLS flag if it is stored in superblock.
  336. */
  337. if ((fs->fs_flags & FS_ACLS) != 0) {
  338. /* XXX: Set too late ? */
  339. MNT_ILOCK(mp);
  340. mp->mnt_flag |= MNT_ACLS;
  341. MNT_IUNLOCK(mp);
  342. }
  343. if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
  344. /* XXX: Set too late ? */
  345. MNT_ILOCK(mp);
  346. mp->mnt_flag |= MNT_NFS4ACLS;
  347. MNT_IUNLOCK(mp);
  348. }
  349. /*
  350. * If this is a request from fsck to clean up the filesystem,
  351. * then allow the specified pid to proceed.
  352. */
  353. if (fsckpid > 0) {
  354. if (ump->um_fsckpid != 0) {
  355. vfs_mount_error(mp,
  356. "Active checker already running on %s",
  357. fs->fs_fsmnt);
  358. return (EINVAL);
  359. }
  360. KASSERT(MOUNTEDSOFTDEP(mp) == 0,
  361. ("soft updates enabled on read-only file system"));
  362. g_topology_lock();
  363. /*
  364. * Request write access.
  365. */
  366. error = g_access(ump->um_cp, 0, 1, 0);
  367. g_topology_unlock();
  368. if (error) {
  369. vfs_mount_error(mp,
  370. "Checker activation failed on %s",
  371. fs->fs_fsmnt);
  372. return (error);
  373. }
  374. ump->um_fsckpid = fsckpid;
  375. if (fs->fs_snapinum[0] != 0)
  376. ffs_snapshot_mount(mp);
  377. fs->fs_mtime = time_second;
  378. fs->fs_fmod = 1;
  379. fs->fs_clean = 0;
  380. (void) ffs_sbupdate(ump, MNT_WAIT, 0);
  381. }
  382. /*
  383. * If this is a snapshot request, take the snapshot.
  384. */
  385. if (mp->mnt_flag & MNT_SNAPSHOT)
  386. return (ffs_snapshot(mp, fspec));
  387. /*
  388. * Must not call namei() while owning busy ref.
  389. */
  390. vfs_unbusy(mp);
  391. }
  392. /*
  393. * Not an update, or updating the name: look up the name
  394. * and verify that it refers to a sensible disk device.
  395. */
  396. NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td);
  397. error = namei(&ndp);
  398. if ((mp->mnt_flag & MNT_UPDATE) != 0) {
  399. /*
  400. * Unmount does not start if MNT_UPDATE is set. Mount
  401. * update busies mp before setting MNT_UPDATE. We
  402. * must be able to retain our busy ref succesfully,
  403. * without sleep.
  404. */
  405. error1 = vfs_busy(mp, MBF_NOWAIT);
  406. MPASS(error1 == 0);
  407. }
  408. if (error != 0)
  409. return (error);
  410. NDFREE(&ndp, NDF_ONLY_PNBUF);
  411. devvp = ndp.ni_vp;
  412. if (!vn_isdisk(devvp, &error)) {
  413. vput(devvp);
  414. return (error);
  415. }
  416. /*
  417. * If mount by non-root, then verify that user has necessary
  418. * permissions on the device.
  419. */
  420. accmode = VREAD;
  421. if ((mp->mnt_flag & MNT_RDONLY) == 0)
  422. accmode |= VWRITE;
  423. error = VOP_ACCESS(devvp, accmode, td->td_ucred, td);
  424. if (error)
  425. error = priv_check(td, PRIV_VFS_MOUNT_PERM);
  426. if (error) {
  427. vput(devvp);
  428. return (error);
  429. }
  430. if (mp->mnt_flag & MNT_UPDATE) {
  431. /*
  432. * Update only
  433. *
  434. * If it's not the same vnode, or at least the same device
  435. * then it's not correct.
  436. */
  437. if (devvp->v_rdev != ump->um_devvp->v_rdev)
  438. error = EINVAL; /* needs translation */
  439. vput(devvp);
  440. if (error)
  441. return (error);
  442. } else {
  443. #endif // 0
  444. /*
  445. * New mount
  446. *
  447. * We need the name for the mount point (also used for
  448. * "last mounted on") copied in. If an error occurs,
  449. * the mount point is discarded by the upper level code.
  450. * Note that vfs_mount_alloc() populates f_mntonname for us.
  451. */
  452. if ((error = ffs_mountfs(devvp, mp, td)) != 0) {
  453. return (error);
  454. }
  455. #if 0
  456. if (fsckpid > 0) {
  457. KASSERT(MOUNTEDSOFTDEP(mp) == 0,
  458. ("soft updates enabled on read-only file system"));
  459. ump = VFSTOUFS(mp);
  460. fs = ump->um_fs;
  461. g_topology_lock();
  462. /*
  463. * Request write access.
  464. */
  465. error = g_access(ump->um_cp, 0, 1, 0);
  466. g_topology_unlock();
  467. if (error) {
  468. printf("WARNING: %s: Checker activation "
  469. "failed\n", fs->fs_fsmnt);
  470. } else {
  471. ump->um_fsckpid = fsckpid;
  472. if (fs->fs_snapinum[0] != 0)
  473. ffs_snapshot_mount(mp);
  474. fs->fs_mtime = time_second;
  475. fs->fs_clean = 0;
  476. (void) ffs_sbupdate(ump, MNT_WAIT, 0);
  477. }
  478. }
  479. }
  480. vfs_mountedfrom(mp, fspec);
  481. #endif // 0
  482. return (0);
  483. }
  484. #if 0
  485. /*
  486. * Compatibility with old mount system call.
  487. */
  488. static int
  489. ffs_cmount(struct mntarg *ma, void *data, uint64_t flags)
  490. {
  491. struct ufs_args args;
  492. struct export_args exp;
  493. int error;
  494. if (data == nil)
  495. return (EINVAL);
  496. error = copyin(data, &args, sizeof args);
  497. if (error)
  498. return (error);
  499. vfs_oexport_conv(&args.export, &exp);
  500. ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN);
  501. ma = mount_arg(ma, "export", &exp, sizeof(exp));
  502. error = kernel_mount(ma, flags);
  503. return (error);
  504. }
  505. /*
  506. * Reload all incore data for a filesystem (used after running fsck on
  507. * the root filesystem and finding things to fix). If the 'force' flag
  508. * is 0, the filesystem must be mounted read-only.
  509. *
  510. * Things to do to update the mount:
  511. * 1) invalidate all cached meta-data.
  512. * 2) re-read superblock from disk.
  513. * 3) re-read summary information from disk.
  514. * 4) invalidate all inactive vnodes.
  515. * 5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary
  516. * writers, if requested.
  517. * 6) invalidate all cached file data.
  518. * 7) re-read inode data for all active vnodes.
  519. */
  520. int
  521. ffs_reload(struct mount *mp, struct thread *td, int flags)
  522. {
  523. struct vnode *vp, *mvp, *devvp;
  524. struct inode *ip;
  525. void *space;
  526. struct buf *bp;
  527. struct fs *fs, *newfs;
  528. struct ufsmount *ump;
  529. ufs2_daddr_t sblockloc;
  530. int i, blks, error;
  531. uint64_t size;
  532. int32_t *lp;
  533. ump = VFSTOUFS(mp);
  534. MNT_ILOCK(mp);
  535. if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) {
  536. MNT_IUNLOCK(mp);
  537. return (EINVAL);
  538. }
  539. MNT_IUNLOCK(mp);
  540. /*
  541. * Step 1: invalidate all cached meta-data.
  542. */
  543. devvp = VFSTOUFS(mp)->um_devvp;
  544. vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
  545. if (vinvalbuf(devvp, 0, 0, 0) != 0)
  546. panic("ffs_reload: dirty1");
  547. VOP_UNLOCK(devvp, 0);
  548. /*
  549. * Step 2: re-read superblock from disk.
  550. */
  551. fs = VFSTOUFS(mp)->um_fs;
  552. if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize,
  553. NOCRED, &bp)) != 0)
  554. return (error);
  555. newfs = (struct fs *)bp->b_data;
  556. if ((newfs->fs_magic != FS_UFS1_MAGIC &&
  557. newfs->fs_magic != FS_UFS2_MAGIC) ||
  558. newfs->fs_bsize > MAXBSIZE ||
  559. newfs->fs_bsize < sizeof(struct fs)) {
  560. brelse(bp);
  561. return (EIO); /* XXX needs translation */
  562. }
  563. /*
  564. * Copy pointer fields back into superblock before copying in XXX
  565. * new superblock. These should really be in the ufsmount. XXX
  566. * Note that important parameters (eg fs_ncg) are unchanged.
  567. */
  568. newfs->fs_csp = fs->fs_csp;
  569. newfs->fs_maxcluster = fs->fs_maxcluster;
  570. newfs->fs_contigdirs = fs->fs_contigdirs;
  571. newfs->fs_active = fs->fs_active;
  572. newfs->fs_ronly = fs->fs_ronly;
  573. sblockloc = fs->fs_sblockloc;
  574. bcopy(newfs, fs, (uint)fs->fs_sbsize);
  575. brelse(bp);
  576. mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
  577. UFS_LOCK(ump);
  578. if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
  579. printf("WARNING: %s: reload pending error: blocks %jd "
  580. "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
  581. fs->fs_pendinginodes);
  582. fs->fs_pendingblocks = 0;
  583. fs->fs_pendinginodes = 0;
  584. }
  585. UFS_UNLOCK(ump);
  586. /*
  587. * Step 3: re-read summary information from disk.
  588. */
  589. size = fs->fs_cssize;
  590. blks = howmany(size, fs->fs_fsize);
  591. if (fs->fs_contigsumsize > 0)
  592. size += fs->fs_ncg * sizeof(int32_t);
  593. size += fs->fs_ncg * sizeof(uint8_t);
  594. free(fs->fs_csp, M_UFSMNT);
  595. space = malloc(size, M_UFSMNT, M_WAITOK);
  596. fs->fs_csp = space;
  597. for (i = 0; i < blks; i += fs->fs_frag) {
  598. size = fs->fs_bsize;
  599. if (i + fs->fs_frag > blks)
  600. size = (blks - i) * fs->fs_fsize;
  601. error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
  602. NOCRED, &bp);
  603. if (error)
  604. return (error);
  605. bcopy(bp->b_data, space, (uint)size);
  606. space = (char *)space + size;
  607. brelse(bp);
  608. }
  609. /*
  610. * We no longer know anything about clusters per cylinder group.
  611. */
  612. if (fs->fs_contigsumsize > 0) {
  613. fs->fs_maxcluster = lp = space;
  614. for (i = 0; i < fs->fs_ncg; i++)
  615. *lp++ = fs->fs_contigsumsize;
  616. space = lp;
  617. }
  618. size = fs->fs_ncg * sizeof(uint8_t);
  619. fs->fs_contigdirs = (uint8_t *)space;
  620. bzero(fs->fs_contigdirs, size);
  621. if ((flags & FFSR_UNSUSPEND) != 0) {
  622. MNT_ILOCK(mp);
  623. mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2);
  624. wakeup(&mp->mnt_flag);
  625. MNT_IUNLOCK(mp);
  626. }
  627. loop:
  628. MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
  629. /*
  630. * Skip syncer vnode.
  631. */
  632. if (vp->v_type == VNON) {
  633. VI_UNLOCK(vp);
  634. continue;
  635. }
  636. /*
  637. * Step 4: invalidate all cached file data.
  638. */
  639. if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
  640. MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
  641. goto loop;
  642. }
  643. if (vinvalbuf(vp, 0, 0, 0))
  644. panic("ffs_reload: dirty2");
  645. /*
  646. * Step 5: re-read inode data for all active vnodes.
  647. */
  648. ip = VTOI(vp);
  649. error =
  650. bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
  651. (int)fs->fs_bsize, NOCRED, &bp);
  652. if (error) {
  653. VOP_UNLOCK(vp, 0);
  654. vrele(vp);
  655. MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
  656. return (error);
  657. }
  658. ffs_load_inode(bp, ip, fs, ip->i_number);
  659. ip->i_effnlink = ip->i_nlink;
  660. brelse(bp);
  661. VOP_UNLOCK(vp, 0);
  662. vrele(vp);
  663. }
  664. return (0);
  665. }
  666. #endif // 0
  667. /*
  668. * Possible superblock locations ordered from most to least likely.
  669. */
  670. static int sblock_try[] = SBLOCKSEARCH;
  671. /*
  672. * Common code for mount and mountroot
  673. */
  674. static int
  675. ffs_mountfs (vnode *devvp, MountPoint *mp, thread *td)
  676. {
  677. // TODO HARVEY - Don't need devvp, and maybe don't need td?
  678. Fs *fs;
  679. void *buf;
  680. ufsmount *ump;
  681. void *space;
  682. ufs2_daddr_t sblockloc;
  683. int error, i, blks, /*len,*/ ronly;
  684. uint64_t size;
  685. int32_t *lp;
  686. Ucred *cred;
  687. //struct g_consumer *cp;
  688. // TODO HARVEY
  689. cred = nil; // NOCRED
  690. //cred = td ? td->td_ucred : NOCRED;
  691. ump = nil;
  692. ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
  693. /*KASSERT(devvp->v_type == VCHR, ("reclaimed devvp"));
  694. dev = devvp->v_rdev;
  695. if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0,
  696. (uintptr_t)mp) == 0) {
  697. VOP_UNLOCK(devvp, 0);
  698. return (EBUSY);
  699. }
  700. g_topology_lock();
  701. error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1);
  702. g_topology_unlock();
  703. if (error != 0) {
  704. atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
  705. VOP_UNLOCK(devvp, 0);
  706. return (error);
  707. }
  708. dev_ref(dev);
  709. devvp->v_bufobj.bo_ops = &ffs_ops;
  710. VOP_UNLOCK(devvp, 0);
  711. if (dev->si_iosize_max != 0)
  712. mp->mnt_iosize_max = dev->si_iosize_max;
  713. if (mp->mnt_iosize_max > MAXPHYS)
  714. mp->mnt_iosize_max = MAXPHYS;
  715. */
  716. fs = nil;
  717. sblockloc = 0;
  718. /*
  719. * Try reading the superblock in each of its possible locations.
  720. */
  721. for (i = 0; sblock_try[i] != -1; i++) {
  722. /*if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) {
  723. error = EINVAL;
  724. vfs_mount_error(mp,
  725. "Invalid sectorsize %d for superblock size %d",
  726. cp->provider->sectorsize, SBLOCKSIZE);
  727. goto out;
  728. }*/
  729. if (bread(mp, btodb(sblock_try[i]), SBLOCKSIZE, &buf) != 0) {
  730. free(buf);
  731. print("not found at %p\n", sblock_try[i]);
  732. error = -1;
  733. goto out;
  734. }
  735. fs = (Fs*)buf;
  736. sblockloc = sblock_try[i];
  737. if ((fs->fs_magic == FS_UFS1_MAGIC ||
  738. (fs->fs_magic == FS_UFS2_MAGIC &&
  739. (fs->fs_sblockloc == sblockloc ||
  740. (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0))) &&
  741. fs->fs_bsize <= MAXBSIZE &&
  742. fs->fs_bsize >= sizeof(Fs)) {
  743. // fs looks valid
  744. break;
  745. }
  746. // fs doesn't look right - free and try again
  747. free(buf);
  748. buf = nil;
  749. }
  750. if (sblock_try[i] == -1) {
  751. error = EINVAL; /* XXX needs translation */
  752. // TODO HARVEY error string?
  753. goto out;
  754. }
  755. fs->fs_fmod = 0;
  756. fs->fs_flags &= ~FS_INDEXDIRS; /* no support for directory indices */
  757. fs->fs_flags &= ~FS_UNCLEAN;
  758. if (fs->fs_clean == 0) {
  759. fs->fs_flags |= FS_UNCLEAN;
  760. if (ronly || (mp->mnt_flag & MNT_FORCE) ||
  761. ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
  762. (fs->fs_flags & FS_DOSOFTDEP))) {
  763. print("WARNING: %s was not properly dismounted\n",
  764. fs->fs_fsmnt);
  765. } else {
  766. print("R/W mount of %s denied. %s%s",
  767. fs->fs_fsmnt, "Filesystem is not clean - run fsck.",
  768. (fs->fs_flags & FS_SUJ) == 0 ? "" :
  769. " Forced mount will invalidate journal contents");
  770. error = EPERM;
  771. goto out;
  772. }
  773. if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) &&
  774. (mp->mnt_flag & MNT_FORCE)) {
  775. print("WARNING: %s: lost blocks %jd files %d\n",
  776. fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
  777. fs->fs_pendinginodes);
  778. fs->fs_pendingblocks = 0;
  779. fs->fs_pendinginodes = 0;
  780. }
  781. }
  782. if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
  783. print("WARNING: %s: mount pending error: blocks %jd "
  784. "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
  785. fs->fs_pendinginodes);
  786. fs->fs_pendingblocks = 0;
  787. fs->fs_pendinginodes = 0;
  788. }
  789. if ((fs->fs_flags & FS_GJOURNAL) != 0) {
  790. print("WARNING: %s: GJOURNAL flag on fs but no "
  791. "UFS_GJOURNAL support\n", fs->fs_fsmnt);
  792. }
  793. ump = (ufsmount*)smalloc(sizeof *ump);
  794. //ump->um_cp = cp;
  795. //ump->um_bo = &devvp->v_bufobj;
  796. ump->um_fs = smalloc((uint64_t)fs->fs_sbsize);
  797. if (fs->fs_magic == FS_UFS1_MAGIC) {
  798. print("WARNING: UFS1 not supported\n");
  799. // TODO HARVEY Better error message
  800. error = -1;
  801. goto out;
  802. } else {
  803. ump->um_fstype = UFS2;
  804. ump->um_balloc = ffs_balloc_ufs2;
  805. }
  806. ump->um_blkatoff = ffs_blkatoff;
  807. ump->um_truncate = ffs_truncate;
  808. ump->um_update = ffs_update;
  809. ump->um_valloc = ffs_valloc;
  810. ump->um_vfree = ffs_vfree;
  811. ump->um_ifree = ffs_ifree;
  812. ump->um_rdonly = ffs_rdonly;
  813. ump->um_snapgone = ffs_snapgone;
  814. memmove(ump->um_fs, fs, (uint)fs->fs_sbsize);
  815. free(buf);
  816. buf = nil;
  817. fs = ump->um_fs;
  818. fs->fs_ronly = ronly;
  819. size = fs->fs_cssize;
  820. blks = HOWMANY(size, fs->fs_fsize);
  821. if (fs->fs_contigsumsize > 0)
  822. size += fs->fs_ncg * sizeof(int32_t);
  823. size += fs->fs_ncg * sizeof(uint8_t);
  824. space = smalloc(size);
  825. fs->fs_csp = space;
  826. for (i = 0; i < blks; i += fs->fs_frag) {
  827. size = fs->fs_bsize;
  828. if (i + fs->fs_frag > blks)
  829. size = (blks - i) * fs->fs_fsize;
  830. if ((error = bread(mp, fsbtodb(fs, fs->fs_csaddr + i),
  831. size, &buf)) != 0) {
  832. free(fs->fs_csp);
  833. goto out;
  834. }
  835. memmove(space, buf, size);
  836. space = (char *)space + size;
  837. free(buf);
  838. }
  839. if (fs->fs_contigsumsize > 0) {
  840. fs->fs_maxcluster = lp = space;
  841. for (i = 0; i < fs->fs_ncg; i++)
  842. *lp++ = fs->fs_contigsumsize;
  843. space = lp;
  844. }
  845. size = fs->fs_ncg * sizeof(uint8_t);
  846. fs->fs_contigdirs = (uint8_t *)space;
  847. memset(fs->fs_contigdirs, 0, size);
  848. fs->fs_active = nil;
  849. mp->mnt_data = ump;
  850. mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
  851. qlock(&mp->mnt_lock);
  852. mp->mnt_flag |= MNT_LOCAL;
  853. qunlock(&mp->mnt_lock);
  854. if ((fs->fs_flags & FS_MULTILABEL) != 0) {
  855. #ifdef MAC
  856. MNT_ILOCK(mp);
  857. mp->mnt_flag |= MNT_MULTILABEL;
  858. MNT_IUNLOCK(mp);
  859. #else
  860. print("WARNING: %s: multilabel flag on fs but "
  861. "no MAC support\n", fs->fs_fsmnt);
  862. #endif
  863. }
  864. if ((fs->fs_flags & FS_ACLS) != 0) {
  865. #ifdef UFS_ACL
  866. MNT_ILOCK(mp);
  867. if (mp->mnt_flag & MNT_NFS4ACLS)
  868. printf("WARNING: %s: ACLs flag on fs conflicts with "
  869. "\"nfsv4acls\" mount option; option ignored\n",
  870. mp->mnt_stat.f_mntonname);
  871. mp->mnt_flag &= ~MNT_NFS4ACLS;
  872. mp->mnt_flag |= MNT_ACLS;
  873. MNT_IUNLOCK(mp);
  874. #else
  875. print("WARNING: %s: ACLs flag on fs but no ACLs support\n",
  876. fs->fs_fsmnt);
  877. #endif
  878. }
  879. if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
  880. #ifdef UFS_ACL
  881. MNT_ILOCK(mp);
  882. if (mp->mnt_flag & MNT_ACLS)
  883. printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts "
  884. "with \"acls\" mount option; option ignored\n",
  885. mp->mnt_stat.f_mntonname);
  886. mp->mnt_flag &= ~MNT_ACLS;
  887. mp->mnt_flag |= MNT_NFS4ACLS;
  888. MNT_IUNLOCK(mp);
  889. #else
  890. print("WARNING: %s: NFSv4 ACLs flag on fs but no "
  891. "ACLs support\n", fs->fs_fsmnt);
  892. #endif
  893. }
  894. // TODO HARVEY TRIM support
  895. /*if ((fs->fs_flags & FS_TRIM) != 0) {
  896. len = sizeof(int);
  897. if (g_io_getattr("GEOM::candelete", cp, &len,
  898. &ump->um_candelete) == 0) {
  899. if (!ump->um_candelete)
  900. printf("WARNING: %s: TRIM flag on fs but disk "
  901. "does not support TRIM\n",
  902. mp->mnt_stat.f_mntonname);
  903. } else {
  904. printf("WARNING: %s: TRIM flag on fs but disk does "
  905. "not confirm that it supports TRIM\n",
  906. mp->mnt_stat.f_mntonname);
  907. ump->um_candelete = 0;
  908. }
  909. if (ump->um_candelete) {
  910. ump->um_trim_tq = taskqueue_create("trim", M_WAITOK,
  911. taskqueue_thread_enqueue, &ump->um_trim_tq);
  912. taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS,
  913. "%s trim", mp->mnt_stat.f_mntonname);
  914. }
  915. }*/
  916. ump->um_mountp = mp;
  917. //ump->um_dev = dev;
  918. //ump->um_devvp = devvp;
  919. ump->um_nindir = fs->fs_nindir;
  920. ump->um_bptrtodb = fs->fs_fsbtodb;
  921. ump->um_seqinc = fs->fs_frag;
  922. //for (i = 0; i < MAXQUOTAS; i++)
  923. // ump->um_quotas[i] = NULLVP;
  924. #ifdef UFS_EXTATTR
  925. ufs_extattr_uepm_init(&ump->um_extattr);
  926. #endif
  927. mp->mnt_stat.f_iosize = fs->fs_bsize;
  928. #if 0
  929. if (mp->mnt_flag & MNT_ROOTFS) {
  930. /*
  931. * Root mount; update timestamp in mount structure.
  932. * this will be used by the common root mount code
  933. * to update the system clock.
  934. */
  935. mp->mnt_time = fs->fs_time;
  936. }
  937. #endif // 0
  938. if (ronly == 0) {
  939. fs->fs_mtime = seconds();
  940. if ((fs->fs_flags & FS_DOSOFTDEP) &&
  941. (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
  942. free(fs->fs_csp);
  943. ffs_flushfiles(mp, FORCECLOSE, td);
  944. goto out;
  945. }
  946. if (fs->fs_snapinum[0] != 0)
  947. ffs_snapshot_mount(mp);
  948. fs->fs_fmod = 1;
  949. fs->fs_clean = 0;
  950. (void) ffs_sbupdate(ump, MNT_WAIT, 0);
  951. }
  952. #ifdef UFS_EXTATTR
  953. #ifdef UFS_EXTATTR_AUTOSTART
  954. /*
  955. *
  956. * Auto-starting does the following:
  957. * - check for /.attribute in the fs, and extattr_start if so
  958. * - for each file in .attribute, enable that file with
  959. * an attribute of the same name.
  960. * Not clear how to report errors -- probably eat them.
  961. * This would all happen while the filesystem was busy/not
  962. * available, so would effectively be "atomic".
  963. */
  964. (void) ufs_extattr_autostart(mp, td);
  965. #endif /* !UFS_EXTATTR_AUTOSTART */
  966. #endif /* !UFS_EXTATTR */
  967. return (0);
  968. out:
  969. if (buf)
  970. free(buf);
  971. /*if (cp != nil) {
  972. g_topology_lock();
  973. g_vfs_close(cp);
  974. g_topology_unlock();
  975. }*/
  976. if (ump) {
  977. /*mtx_destroy(UFS_MTX(ump));
  978. if (mp->mnt_gjprovider != nil) {
  979. free(mp->mnt_gjprovider, M_UFSMNT);
  980. mp->mnt_gjprovider = nil;
  981. }*/
  982. free(ump->um_fs);
  983. free(ump);
  984. mp->mnt_data = nil;
  985. }
  986. //atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
  987. //dev_rel(dev);
  988. return (error);
  989. }
  990. /*
  991. * unmount system call
  992. */
  993. int
  994. ffs_unmount (MountPoint *mp, int mntflags)
  995. {
  996. int error = 0;
  997. print("HARVEY TODO: %s\n", __func__);
  998. #if 0
  999. struct thread *td;
  1000. struct ufsmount *ump = VFSTOUFS(mp);
  1001. struct fs *fs;
  1002. int error, flags, susp;
  1003. #ifdef UFS_EXTATTR
  1004. int e_restart;
  1005. #endif
  1006. flags = 0;
  1007. td = curthread;
  1008. fs = ump->um_fs;
  1009. susp = 0;
  1010. if (mntflags & MNT_FORCE) {
  1011. flags |= FORCECLOSE;
  1012. susp = fs->fs_ronly == 0;
  1013. }
  1014. #ifdef UFS_EXTATTR
  1015. if ((error = ufs_extattr_stop(mp, td))) {
  1016. if (error != EOPNOTSUPP)
  1017. printf("WARNING: unmount %s: ufs_extattr_stop "
  1018. "returned errno %d\n", mp->mnt_stat.f_mntonname,
  1019. error);
  1020. e_restart = 0;
  1021. } else {
  1022. ufs_extattr_uepm_destroy(&ump->um_extattr);
  1023. e_restart = 1;
  1024. }
  1025. #endif
  1026. if (susp) {
  1027. error = vfs_write_suspend_umnt(mp);
  1028. if (error != 0)
  1029. goto fail1;
  1030. }
  1031. if (MOUNTEDSOFTDEP(mp))
  1032. error = softdep_flushfiles(mp, flags, td);
  1033. else
  1034. error = ffs_flushfiles(mp, flags, td);
  1035. if (error != 0 && error != ENXIO)
  1036. goto fail;
  1037. UFS_LOCK(ump);
  1038. if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
  1039. printf("WARNING: unmount %s: pending error: blocks %jd "
  1040. "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
  1041. fs->fs_pendinginodes);
  1042. fs->fs_pendingblocks = 0;
  1043. fs->fs_pendinginodes = 0;
  1044. }
  1045. UFS_UNLOCK(ump);
  1046. if (MOUNTEDSOFTDEP(mp))
  1047. softdep_unmount(mp);
  1048. if (fs->fs_ronly == 0 || ump->um_fsckpid > 0) {
  1049. fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
  1050. error = ffs_sbupdate(ump, MNT_WAIT, 0);
  1051. if (error && error != ENXIO) {
  1052. fs->fs_clean = 0;
  1053. goto fail;
  1054. }
  1055. }
  1056. if (susp)
  1057. vfs_write_resume(mp, VR_START_WRITE);
  1058. if (ump->um_trim_tq != nil) {
  1059. while (ump->um_trim_inflight != 0)
  1060. pause("ufsutr", hz);
  1061. taskqueue_drain_all(ump->um_trim_tq);
  1062. taskqueue_free(ump->um_trim_tq);
  1063. }
  1064. g_topology_lock();
  1065. if (ump->um_fsckpid > 0) {
  1066. /*
  1067. * Return to normal read-only mode.
  1068. */
  1069. error = g_access(ump->um_cp, 0, -1, 0);
  1070. ump->um_fsckpid = 0;
  1071. }
  1072. g_vfs_close(ump->um_cp);
  1073. g_topology_unlock();
  1074. atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0);
  1075. vrele(ump->um_devvp);
  1076. dev_rel(ump->um_dev);
  1077. mtx_destroy(UFS_MTX(ump));
  1078. if (mp->mnt_gjprovider != nil) {
  1079. free(mp->mnt_gjprovider, M_UFSMNT);
  1080. mp->mnt_gjprovider = nil;
  1081. }
  1082. free(fs->fs_csp, M_UFSMNT);
  1083. free(fs, M_UFSMNT);
  1084. free(ump, M_UFSMNT);
  1085. mp->mnt_data = nil;
  1086. MNT_ILOCK(mp);
  1087. mp->mnt_flag &= ~MNT_LOCAL;
  1088. MNT_IUNLOCK(mp);
  1089. return (error);
  1090. fail:
  1091. if (susp)
  1092. vfs_write_resume(mp, VR_START_WRITE);
  1093. fail1:
  1094. #ifdef UFS_EXTATTR
  1095. if (e_restart) {
  1096. ufs_extattr_uepm_init(&ump->um_extattr);
  1097. #ifdef UFS_EXTATTR_AUTOSTART
  1098. (void) ufs_extattr_autostart(mp, td);
  1099. #endif
  1100. }
  1101. #endif
  1102. #endif // 0
  1103. return (error);
  1104. }
  1105. /*
  1106. * Flush out all the files in a filesystem.
  1107. */
  1108. int
  1109. ffs_flushfiles (MountPoint *mp, int flags, thread *td)
  1110. {
  1111. int error = 0;
  1112. print("HARVEY TODO: %s\n", __func__);
  1113. #if 0
  1114. struct ufsmount *ump;
  1115. int qerror, error;
  1116. ump = VFSTOUFS(mp);
  1117. qerror = 0;
  1118. #ifdef QUOTA
  1119. if (mp->mnt_flag & MNT_QUOTA) {
  1120. int i;
  1121. error = vflush(mp, 0, SKIPSYSTEM|flags, td);
  1122. if (error)
  1123. return (error);
  1124. for (i = 0; i < MAXQUOTAS; i++) {
  1125. error = quotaoff(td, mp, i);
  1126. if (error != 0) {
  1127. if ((flags & EARLYFLUSH) == 0)
  1128. return (error);
  1129. else
  1130. qerror = error;
  1131. }
  1132. }
  1133. /*
  1134. * Here we fall through to vflush again to ensure that
  1135. * we have gotten rid of all the system vnodes, unless
  1136. * quotas must not be closed.
  1137. */
  1138. }
  1139. #endif
  1140. ASSERT_VOP_LOCKED(ump->um_devvp, "ffs_flushfiles");
  1141. if (ump->um_devvp->v_vflag & VV_COPYONWRITE) {
  1142. if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0)
  1143. return (error);
  1144. ffs_snapshot_unmount(mp);
  1145. flags |= FORCECLOSE;
  1146. /*
  1147. * Here we fall through to vflush again to ensure
  1148. * that we have gotten rid of all the system vnodes.
  1149. */
  1150. }
  1151. /*
  1152. * Do not close system files if quotas were not closed, to be
  1153. * able to sync the remaining dquots. The freeblks softupdate
  1154. * workitems might hold a reference on a dquot, preventing
  1155. * quotaoff() from completing. Next round of
  1156. * softdep_flushworklist() iteration should process the
  1157. * blockers, allowing the next run of quotaoff() to finally
  1158. * flush held dquots.
  1159. *
  1160. * Otherwise, flush all the files.
  1161. */
  1162. if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0)
  1163. return (error);
  1164. /*
  1165. * Flush filesystem metadata.
  1166. */
  1167. vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
  1168. error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td);
  1169. VOP_UNLOCK(ump->um_devvp, 0);
  1170. #endif // 0
  1171. return (error);
  1172. }
  1173. #if 0
  1174. /*
  1175. * Get filesystem statistics.
  1176. */
  1177. static int
  1178. ffs_statfs (struct mount *mp, struct statfs *sbp)
  1179. {
  1180. struct ufsmount *ump;
  1181. struct fs *fs;
  1182. ump = VFSTOUFS(mp);
  1183. fs = ump->um_fs;
  1184. if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC)
  1185. panic("ffs_statfs");
  1186. sbp->f_version = STATFS_VERSION;
  1187. sbp->f_bsize = fs->fs_fsize;
  1188. sbp->f_iosize = fs->fs_bsize;
  1189. sbp->f_blocks = fs->fs_dsize;
  1190. UFS_LOCK(ump);
  1191. sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
  1192. fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
  1193. sbp->f_bavail = freespace(fs, fs->fs_minfree) +
  1194. dbtofsb(fs, fs->fs_pendingblocks);
  1195. sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO;
  1196. sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
  1197. UFS_UNLOCK(ump);
  1198. sbp->f_namemax = UFS_MAXNAMLEN;
  1199. return (0);
  1200. }
  1201. static bool
  1202. sync_doupdate(struct inode *ip)
  1203. {
  1204. return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED |
  1205. IN_UPDATE)) != 0);
  1206. }
  1207. /*
  1208. * For a lazy sync, we only care about access times, quotas and the
  1209. * superblock. Other filesystem changes are already converted to
  1210. * cylinder group blocks or inode blocks updates and are written to
  1211. * disk by syncer.
  1212. */
  1213. static int
  1214. ffs_sync_lazy (struct mount *mp)
  1215. {
  1216. struct vnode *mvp, *vp;
  1217. struct inode *ip;
  1218. struct thread *td;
  1219. int allerror, error;
  1220. allerror = 0;
  1221. td = curthread;
  1222. if ((mp->mnt_flag & MNT_NOATIME) != 0)
  1223. goto qupdate;
  1224. MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) {
  1225. if (vp->v_type == VNON) {
  1226. VI_UNLOCK(vp);
  1227. continue;
  1228. }
  1229. ip = VTOI(vp);
  1230. /*
  1231. * The IN_ACCESS flag is converted to IN_MODIFIED by
  1232. * ufs_close() and ufs_getattr() by the calls to
  1233. * ufs_itimes_locked(), without subsequent UFS_UPDATE().
  1234. * Test also all the other timestamp flags too, to pick up
  1235. * any other cases that could be missed.
  1236. */
  1237. if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) {
  1238. VI_UNLOCK(vp);
  1239. continue;
  1240. }
  1241. if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
  1242. td)) != 0)
  1243. continue;
  1244. if (sync_doupdate(ip))
  1245. error = ffs_update(vp, 0);
  1246. if (error != 0)
  1247. allerror = error;
  1248. vput(vp);
  1249. }
  1250. qupdate:
  1251. #ifdef QUOTA
  1252. qsync(mp);
  1253. #endif
  1254. if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 &&
  1255. (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0)
  1256. allerror = error;
  1257. return (allerror);
  1258. }
  1259. /*
  1260. * Go through the disk queues to initiate sandbagged IO;
  1261. * go through the inodes to write those that have been modified;
  1262. * initiate the writing of the super block if it has been modified.
  1263. *
  1264. * Note: we are always called with the filesystem marked busy using
  1265. * vfs_busy().
  1266. */
  1267. static int
  1268. ffs_sync (struct mount *mp, int waitfor)
  1269. {
  1270. struct vnode *mvp, *vp, *devvp;
  1271. struct thread *td;
  1272. struct inode *ip;
  1273. struct ufsmount *ump = VFSTOUFS(mp);
  1274. struct fs *fs;
  1275. int error, count, lockreq, allerror = 0;
  1276. int suspend;
  1277. int suspended;
  1278. int secondary_writes;
  1279. int secondary_accwrites;
  1280. int softdep_deps;
  1281. int softdep_accdeps;
  1282. struct bufobj *bo;
  1283. suspend = 0;
  1284. suspended = 0;
  1285. td = curthread;
  1286. fs = ump->um_fs;
  1287. if (fs->fs_fmod != 0 && fs->fs_ronly != 0 && ump->um_fsckpid == 0)
  1288. panic("%s: ffs_sync: modification on read-only filesystem",
  1289. fs->fs_fsmnt);
  1290. if (waitfor == MNT_LAZY) {
  1291. if (!rebooting)
  1292. return (ffs_sync_lazy(mp));
  1293. waitfor = MNT_NOWAIT;
  1294. }
  1295. /*
  1296. * Write back each (modified) inode.
  1297. */
  1298. lockreq = LK_EXCLUSIVE | LK_NOWAIT;
  1299. if (waitfor == MNT_SUSPEND) {
  1300. suspend = 1;
  1301. waitfor = MNT_WAIT;
  1302. }
  1303. if (waitfor == MNT_WAIT)
  1304. lockreq = LK_EXCLUSIVE;
  1305. lockreq |= LK_INTERLOCK | LK_SLEEPFAIL;
  1306. loop:
  1307. /* Grab snapshot of secondary write counts */
  1308. MNT_ILOCK(mp);
  1309. secondary_writes = mp->mnt_secondary_writes;
  1310. secondary_accwrites = mp->mnt_secondary_accwrites;
  1311. MNT_IUNLOCK(mp);
  1312. /* Grab snapshot of softdep dependency counts */
  1313. softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps);
  1314. MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
  1315. /*
  1316. * Depend on the vnode interlock to keep things stable enough
  1317. * for a quick test. Since there might be hundreds of
  1318. * thousands of vnodes, we cannot afford even a subroutine
  1319. * call unless there's a good chance that we have work to do.
  1320. */
  1321. if (vp->v_type == VNON) {
  1322. VI_UNLOCK(vp);
  1323. continue;
  1324. }
  1325. ip = VTOI(vp);
  1326. if ((ip->i_flag &
  1327. (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
  1328. vp->v_bufobj.bo_dirty.bv_cnt == 0) {
  1329. VI_UNLOCK(vp);
  1330. continue;
  1331. }
  1332. if ((error = vget(vp, lockreq, td)) != 0) {
  1333. if (error == ENOENT || error == ENOLCK) {
  1334. MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
  1335. goto loop;
  1336. }
  1337. continue;
  1338. }
  1339. if ((error = ffs_syncvnode(vp, waitfor, 0)) != 0)
  1340. allerror = error;
  1341. vput(vp);
  1342. }
  1343. /*
  1344. * Force stale filesystem control information to be flushed.
  1345. */
  1346. if (waitfor == MNT_WAIT || rebooting) {
  1347. if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
  1348. allerror = error;
  1349. /* Flushed work items may create new vnodes to clean */
  1350. if (allerror == 0 && count)
  1351. goto loop;
  1352. }
  1353. #ifdef QUOTA
  1354. qsync(mp);
  1355. #endif
  1356. devvp = ump->um_devvp;
  1357. bo = &devvp->v_bufobj;
  1358. BO_LOCK(bo);
  1359. if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) {
  1360. BO_UNLOCK(bo);
  1361. vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
  1362. error = VOP_FSYNC(devvp, waitfor, td);
  1363. VOP_UNLOCK(devvp, 0);
  1364. if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN))
  1365. error = ffs_sbupdate(ump, waitfor, 0);
  1366. if (error != 0)
  1367. allerror = error;
  1368. if (allerror == 0 && waitfor == MNT_WAIT)
  1369. goto loop;
  1370. } else if (suspend != 0) {
  1371. if (softdep_check_suspend(mp,
  1372. devvp,
  1373. softdep_deps,
  1374. softdep_accdeps,
  1375. secondary_writes,
  1376. secondary_accwrites) != 0) {
  1377. MNT_IUNLOCK(mp);
  1378. goto loop; /* More work needed */
  1379. }
  1380. mtx_assert(MNT_MTX(mp), MA_OWNED);
  1381. mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
  1382. MNT_IUNLOCK(mp);
  1383. suspended = 1;
  1384. } else
  1385. BO_UNLOCK(bo);
  1386. /*
  1387. * Write back modified superblock.
  1388. */
  1389. if (fs->fs_fmod != 0 &&
  1390. (error = ffs_sbupdate(ump, waitfor, suspended)) != 0)
  1391. allerror = error;
  1392. return (allerror);
  1393. }
  1394. #endif // 0
  1395. int
  1396. ffs_vget(MountPoint *mp, ino_t ino, int flags, vnode **vpp)
  1397. {
  1398. return (ffs_vgetf(mp, ino, flags, vpp, 0));
  1399. }
  1400. int
  1401. ffs_vgetf(MountPoint *mp, ino_t ino, int flags, vnode **vpp, int ffs_flags)
  1402. {
  1403. Fs *fs;
  1404. inode *ip;
  1405. ufsmount *ump;
  1406. void *buf;
  1407. vnode *vp;
  1408. int error;
  1409. error = findexistingvnode(mp, ino, vpp);
  1410. if (error || *vpp != nil)
  1411. return (error);
  1412. /*
  1413. * We must promote to an exclusive lock for vnode creation. This
  1414. * can happen if lookup is passed LOCKSHARED.
  1415. */
  1416. //if ((flags & LK_TYPE_MASK) == LK_SHARED) {
  1417. // flags &= ~LK_TYPE_MASK;
  1418. // flags |= LK_EXCLUSIVE;
  1419. //}
  1420. /*
  1421. * We do not lock vnode creation as it is believed to be too
  1422. * expensive for such rare case as simultaneous creation of vnode
  1423. * for same ino by different processes. We just allow them to race
  1424. * and check later to decide who wins. Let the race begin!
  1425. */
  1426. ump = mp->mnt_data;
  1427. fs = ump->um_fs;
  1428. ip = smalloc(sizeof(inode));
  1429. /* Allocate a new vnode/inode. */
  1430. error = getnewvnode(mp, &vp);
  1431. if (error) {
  1432. *vpp = nil;
  1433. free(ip);
  1434. return (error);
  1435. }
  1436. /*
  1437. * FFS supports recursive locking.
  1438. */
  1439. if (flags & LK_EXCLUSIVE) {
  1440. wlock(&vp->vnlock);
  1441. } else if (flags & LK_SHARED) {
  1442. rlock(&vp->vnlock);
  1443. }
  1444. //VN_LOCK_AREC(vp);
  1445. vp->data = ip;
  1446. //vp->v_bufobj.bo_bsize = fs->fs_bsize;
  1447. ip->i_vnode = vp;
  1448. ip->i_ump = ump;
  1449. ip->i_number = ino;
  1450. ip->i_ea_refs = 0;
  1451. ip->i_nextclustercg = -1;
  1452. ip->i_flag = IN_UFS2;
  1453. #ifdef QUOTA
  1454. {
  1455. int i;
  1456. for (i = 0; i < MAXQUOTAS; i++)
  1457. ip->i_dquot[i] = NODQUOT;
  1458. }
  1459. #endif
  1460. // TODO HARVEY Need to add vnode to collection in mountpoint
  1461. //if (ffs_flags & FFSV_FORCEINSMQ)
  1462. // vp->v_vflag |= VV_FORCEINSMQ;
  1463. //error = insmntque(vp, mp);
  1464. //if (error != 0) {
  1465. // ufree(ip);
  1466. // *vpp = nil;
  1467. // return (error);
  1468. //}
  1469. // TODO HARVEY Implement hashing at some point
  1470. //vp->v_vflag &= ~VV_FORCEINSMQ;
  1471. //error = vfs_hash_insert(vp, ino, flags, curthread, vpp, nil, nil);
  1472. //if (error || *vpp != nil)
  1473. // return (error);
  1474. /* Read in the disk contents for the inode, copy into the inode. */
  1475. error = bread(mp, fsbtodb(fs, ino_to_fsba(fs, ino)),
  1476. (int)fs->fs_bsize, &buf);
  1477. if (error) {
  1478. /*
  1479. * The inode does not contain anything useful, so it would
  1480. * be misleading to leave it on its hash chain. With mode
  1481. * still zero, it will be unlinked and returned to the free
  1482. * list by vput().
  1483. */
  1484. free(buf);
  1485. releaseufsvnode(mp, vp);
  1486. *vpp = nil;
  1487. return (error);
  1488. }
  1489. ip->din2 = smalloc(sizeof(ufs2_dinode));
  1490. ffs_load_inode(buf, ip, fs, ino);
  1491. // TODO HARVEY SOFTDEP
  1492. //if (DOINGSOFTDEP(vp))
  1493. // softdep_load_inodeblock(ip);
  1494. //else
  1495. ip->i_effnlink = ip->i_nlink;
  1496. free(buf);
  1497. /*
  1498. * Initialize the vnode from the inode, check for aliases.
  1499. * Note that the underlying vnode may have changed.
  1500. */
  1501. error = ufs_vinit(mp, &vp);
  1502. if (error) {
  1503. releaseufsvnode(mp, vp);
  1504. *vpp = nil;
  1505. return (error);
  1506. }
  1507. /*
  1508. * Finish inode initialization.
  1509. */
  1510. /* FFS supports shared locking for all files except fifos. */
  1511. //VN_LOCK_ASHARE(vp);
  1512. /*
  1513. * Set up a generation number for this inode if it does not
  1514. * already have one. This should only happen on old filesystems.
  1515. */
  1516. if (ip->i_gen == 0) {
  1517. while (ip->i_gen == 0) {
  1518. // i_gen is uint64_t, but arc4random() (which was
  1519. // previously the source here), returns uint32_t.
  1520. // lrand() returns int32_t, which isn't ideal but it's
  1521. // all we have right now
  1522. ip->i_gen = lrand();
  1523. }
  1524. if ((vp->mount->mnt_flag & MNT_RDONLY) == 0) {
  1525. ip->i_flag |= IN_MODIFIED;
  1526. ip->din2->di_gen = ip->i_gen;
  1527. }
  1528. }
  1529. #ifdef MAC
  1530. if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) {
  1531. /*
  1532. * If this vnode is already allocated, and we're running
  1533. * multi-label, attempt to perform a label association
  1534. * from the extended attributes on the inode.
  1535. */
  1536. error = mac_vnode_associate_extattr(mp, vp);
  1537. if (error) {
  1538. /* ufs_inactive will release ip->i_devvp ref. */
  1539. vput(vp);
  1540. *vpp = nil;
  1541. return (error);
  1542. }
  1543. }
  1544. #endif
  1545. *vpp = vp;
  1546. return (0);
  1547. }
  1548. #if 0
  1549. /*
  1550. * File handle to vnode
  1551. *
  1552. * Have to be really careful about stale file handles:
  1553. * - check that the inode number is valid
  1554. * - for UFS2 check that the inode number is initialized
  1555. * - call ffs_vget() to get the locked inode
  1556. * - check for an unallocated inode (i_mode == 0)
  1557. * - check that the given client host has export rights and return
  1558. * those rights via. exflagsp and credanonp
  1559. */
  1560. static int
  1561. ffs_fhtovp (struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp)
  1562. {
  1563. struct ufid *ufhp;
  1564. struct ufsmount *ump;
  1565. struct fs *fs;
  1566. struct cg *cgp;
  1567. struct buf *bp;
  1568. ino_t ino;
  1569. uint cg;
  1570. int error;
  1571. ufhp = (struct ufid *)fhp;
  1572. ino = ufhp->ufid_ino;
  1573. ump = VFSTOUFS(mp);
  1574. fs = ump->um_fs;
  1575. if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg)
  1576. return (ESTALE);
  1577. /*
  1578. * Need to check if inode is initialized because UFS2 does lazy
  1579. * initialization and nfs_fhtovp can offer arbitrary inode numbers.
  1580. */
  1581. if (fs->fs_magic != FS_UFS2_MAGIC)
  1582. return (ufs_fhtovp(mp, ufhp, flags, vpp));
  1583. cg = ino_to_cg(fs, ino);
  1584. error = bread(ump->um_devvp, fsbtodb(fs, cgtod(fs, cg)),
  1585. (int)fs->fs_cgsize, NOCRED, &bp);
  1586. if (error)
  1587. return (error);
  1588. cgp = (struct cg *)bp->b_data;
  1589. if (!cg_chkmagic(cgp) || ino >= cg * fs->fs_ipg + cgp->cg_initediblk) {
  1590. brelse(bp);
  1591. return (ESTALE);
  1592. }
  1593. brelse(bp);
  1594. return (ufs_fhtovp(mp, ufhp, flags, vpp));
  1595. }
  1596. #endif // 0
  1597. /*
  1598. * Initialize the filesystem.
  1599. */
  1600. int
  1601. ffs_init ()
  1602. {
  1603. ffs_susp_initialize();
  1604. softdep_initialize();
  1605. return (ufs_init());
  1606. }
  1607. /*
  1608. * Undo the work of ffs_init().
  1609. */
  1610. int
  1611. ffs_uninit ()
  1612. {
  1613. int ret;
  1614. ret = ufs_uninit();
  1615. softdep_uninitialize();
  1616. ffs_susp_uninitialize();
  1617. return (ret);
  1618. }
  1619. /*
  1620. * Write a superblock and associated information back to disk.
  1621. */
  1622. int
  1623. ffs_sbupdate (ufsmount *ump, int waitfor, int suspended)
  1624. {
  1625. int allerror = 0;
  1626. print("HARVEY TODO: %s\n", __func__);
  1627. #if 0
  1628. struct fs *fs = ump->um_fs;
  1629. struct buf *sbbp;
  1630. struct buf *bp;
  1631. int blks;
  1632. void *space;
  1633. int i, size, error, allerror = 0;
  1634. if (fs->fs_ronly == 1 &&
  1635. (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) !=
  1636. (MNT_RDONLY | MNT_UPDATE) && ump->um_fsckpid == 0)
  1637. panic("ffs_sbupdate: write read-only filesystem");
  1638. /*
  1639. * We use the superblock's buf to serialize calls to ffs_sbupdate().
  1640. */
  1641. sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
  1642. (int)fs->fs_sbsize, 0, 0, 0);
  1643. /*
  1644. * First write back the summary information.
  1645. */
  1646. blks = howmany(fs->fs_cssize, fs->fs_fsize);
  1647. space = fs->fs_csp;
  1648. for (i = 0; i < blks; i += fs->fs_frag) {
  1649. size = fs->fs_bsize;
  1650. if (i + fs->fs_frag > blks)
  1651. size = (blks - i) * fs->fs_fsize;
  1652. bp = getblk(ump->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
  1653. size, 0, 0, 0);
  1654. bcopy(space, bp->b_data, (uint)size);
  1655. space = (char *)space + size;
  1656. if (suspended)
  1657. bp->b_flags |= B_VALIDSUSPWRT;
  1658. if (waitfor != MNT_WAIT)
  1659. bawrite(bp);
  1660. else if ((error = bwrite(bp)) != 0)
  1661. allerror = error;
  1662. }
  1663. /*
  1664. * Now write back the superblock itself. If any errors occurred
  1665. * up to this point, then fail so that the superblock avoids
  1666. * being written out as clean.
  1667. */
  1668. if (allerror) {
  1669. brelse(sbbp);
  1670. return (allerror);
  1671. }
  1672. bp = sbbp;
  1673. if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 &&
  1674. (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
  1675. printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
  1676. fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1);
  1677. fs->fs_sblockloc = SBLOCK_UFS1;
  1678. }
  1679. if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 &&
  1680. (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
  1681. printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
  1682. fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2);
  1683. fs->fs_sblockloc = SBLOCK_UFS2;
  1684. }
  1685. fs->fs_fmod = 0;
  1686. fs->fs_time = time_second;
  1687. if (MOUNTEDSOFTDEP(ump->um_mountp))
  1688. softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp);
  1689. bcopy((caddr_t)fs, bp->b_data, (uint)fs->fs_sbsize);
  1690. ffs_oldfscompat_write((struct fs *)bp->b_data, ump);
  1691. if (suspended)
  1692. bp->b_flags |= B_VALIDSUSPWRT;
  1693. if (waitfor != MNT_WAIT)
  1694. bawrite(bp);
  1695. else if ((error = bwrite(bp)) != 0)
  1696. allerror = error;
  1697. #endif // 0
  1698. return (allerror);
  1699. }
  1700. #if 0
  1701. static int
  1702. ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp,
  1703. int attrnamespace, const char *attrname)
  1704. {
  1705. #ifdef UFS_EXTATTR
  1706. return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace,
  1707. attrname));
  1708. #else
  1709. return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace,
  1710. attrname));
  1711. #endif
  1712. }
  1713. #endif // 0
  1714. static void
  1715. ffs_ifree(ufsmount *ump, inode *ip)
  1716. {
  1717. print("HARVEY TODO: %s\n", __func__);
  1718. #if 0
  1719. if (ump->um_fstype == UFS1 && ip->i_din1 != nil)
  1720. uma_zfree(uma_ufs1, ip->i_din1);
  1721. else if (ip->i_din2 != nil)
  1722. uma_zfree(uma_ufs2, ip->i_din2);
  1723. uma_zfree(uma_inode, ip);
  1724. #endif // 0
  1725. }
  1726. #if 0
  1727. static int dobkgrdwrite = 1;
  1728. SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
  1729. "Do background writes (honoring the BV_BKGRDWRITE flag)?");
  1730. /*
  1731. * Complete a background write started from bwrite.
  1732. */
  1733. static void
  1734. ffs_backgroundwritedone(struct buf *bp)
  1735. {
  1736. struct bufobj *bufobj;
  1737. struct buf *origbp;
  1738. /*
  1739. * Find the original buffer that we are writing.
  1740. */
  1741. bufobj = bp->b_bufobj;
  1742. BO_LOCK(bufobj);
  1743. if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == nil)
  1744. panic("backgroundwritedone: lost buffer");
  1745. /*
  1746. * We should mark the cylinder group buffer origbp as
  1747. * dirty, to not loose the failed write.
  1748. */
  1749. if ((bp->b_ioflags & BIO_ERROR) != 0)
  1750. origbp->b_vflags |= BV_BKGRDERR;
  1751. BO_UNLOCK(bufobj);
  1752. /*
  1753. * Process dependencies then return any unfinished ones.
  1754. */
  1755. pbrelvp(bp);
  1756. if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0)
  1757. buf_complete(bp);
  1758. #ifdef SOFTUPDATES
  1759. if (!LIST_EMPTY(&bp->b_dep))
  1760. softdep_move_dependencies(bp, origbp);
  1761. #endif
  1762. /*
  1763. * This buffer is marked B_NOCACHE so when it is released
  1764. * by biodone it will be tossed.
  1765. */
  1766. bp->b_flags |= B_NOCACHE;
  1767. bp->b_flags &= ~B_CACHE;
  1768. /*
  1769. * Prevent brelse() from trying to keep and re-dirtying bp on
  1770. * errors. It causes b_bufobj dereference in
  1771. * bdirty()/reassignbuf(), and b_bufobj was cleared in
  1772. * pbrelvp() above.
  1773. */
  1774. if ((bp->b_ioflags & BIO_ERROR) != 0)
  1775. bp->b_flags |= B_INVAL;
  1776. bufdone(bp);
  1777. BO_LOCK(bufobj);
  1778. /*
  1779. * Clear the BV_BKGRDINPROG flag in the original buffer
  1780. * and awaken it if it is waiting for the write to complete.
  1781. * If BV_BKGRDINPROG is not set in the original buffer it must
  1782. * have been released and re-instantiated - which is not legal.
  1783. */
  1784. KASSERT((origbp->b_vflags & BV_BKGRDINPROG),
  1785. ("backgroundwritedone: lost buffer2"));
  1786. origbp->b_vflags &= ~BV_BKGRDINPROG;
  1787. if (origbp->b_vflags & BV_BKGRDWAIT) {
  1788. origbp->b_vflags &= ~BV_BKGRDWAIT;
  1789. wakeup(&origbp->b_xflags);
  1790. }
  1791. BO_UNLOCK(bufobj);
  1792. }
  1793. /*
  1794. * Write, release buffer on completion. (Done by iodone
  1795. * if async). Do not bother writing anything if the buffer
  1796. * is invalid.
  1797. *
  1798. * Note that we set B_CACHE here, indicating that buffer is
  1799. * fully valid and thus cacheable. This is true even of NFS
  1800. * now so we set it generally. This could be set either here
  1801. * or in biodone() since the I/O is synchronous. We put it
  1802. * here.
  1803. */
  1804. static int
  1805. ffs_bufwrite(struct buf *bp)
  1806. {
  1807. struct buf *newbp;
  1808. CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
  1809. if (bp->b_flags & B_INVAL) {
  1810. brelse(bp);
  1811. return (0);
  1812. }
  1813. if (!BUF_ISLOCKED(bp))
  1814. panic("bufwrite: buffer is not busy???");
  1815. /*
  1816. * If a background write is already in progress, delay
  1817. * writing this block if it is asynchronous. Otherwise
  1818. * wait for the background write to complete.
  1819. */
  1820. BO_LOCK(bp->b_bufobj);
  1821. if (bp->b_vflags & BV_BKGRDINPROG) {
  1822. if (bp->b_flags & B_ASYNC) {
  1823. BO_UNLOCK(bp->b_bufobj);
  1824. bdwrite(bp);
  1825. return (0);
  1826. }
  1827. bp->b_vflags |= BV_BKGRDWAIT;
  1828. msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO,
  1829. "bwrbg", 0);
  1830. if (bp->b_vflags & BV_BKGRDINPROG)
  1831. panic("bufwrite: still writing");
  1832. }
  1833. bp->b_vflags &= ~BV_BKGRDERR;
  1834. BO_UNLOCK(bp->b_bufobj);
  1835. /*
  1836. * If this buffer is marked for background writing and we
  1837. * do not have to wait for it, make a copy and write the
  1838. * copy so as to leave this buffer ready for further use.
  1839. *
  1840. * This optimization eats a lot of memory. If we have a page
  1841. * or buffer shortfall we can't do it.
  1842. */
  1843. if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) &&
  1844. (bp->b_flags & B_ASYNC) &&
  1845. !vm_page_count_severe() &&
  1846. !buf_dirty_count_severe()) {
  1847. KASSERT(bp->b_iodone == nil,
  1848. ("bufwrite: needs chained iodone (%p)", bp->b_iodone));
  1849. /* get a new block */
  1850. newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD);
  1851. if (newbp == nil)
  1852. goto normal_write;
  1853. KASSERT(buf_mapped(bp), ("Unmapped cg"));
  1854. memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
  1855. BO_LOCK(bp->b_bufobj);
  1856. bp->b_vflags |= BV_BKGRDINPROG;
  1857. BO_UNLOCK(bp->b_bufobj);
  1858. newbp->b_xflags |= BX_BKGRDMARKER;
  1859. newbp->b_lblkno = bp->b_lblkno;
  1860. newbp->b_blkno = bp->b_blkno;
  1861. newbp->b_offset = bp->b_offset;
  1862. newbp->b_iodone = ffs_backgroundwritedone;
  1863. newbp->b_flags |= B_ASYNC;
  1864. newbp->b_flags &= ~B_INVAL;
  1865. pbgetvp(bp->b_vp, newbp);
  1866. #ifdef SOFTUPDATES
  1867. /*
  1868. * Move over the dependencies. If there are rollbacks,
  1869. * leave the parent buffer dirtied as it will need to
  1870. * be written again.
  1871. */
  1872. if (LIST_EMPTY(&bp->b_dep) ||
  1873. softdep_move_dependencies(bp, newbp) == 0)
  1874. bundirty(bp);
  1875. #else
  1876. bundirty(bp);
  1877. #endif
  1878. /*
  1879. * Initiate write on the copy, release the original. The
  1880. * BKGRDINPROG flag prevents it from going away until
  1881. * the background write completes.
  1882. */
  1883. bqrelse(bp);
  1884. bp = newbp;
  1885. } else
  1886. /* Mark the buffer clean */
  1887. bundirty(bp);
  1888. /* Let the normal bufwrite do the rest for us */
  1889. normal_write:
  1890. return (bufwrite(bp));
  1891. }
  1892. static void
  1893. ffs_geom_strategy(struct bufobj *bo, struct buf *bp)
  1894. {
  1895. struct vnode *vp;
  1896. int error;
  1897. struct buf *tbp;
  1898. int nocopy;
  1899. vp = bo2vnode(bo);
  1900. if (bp->b_iocmd == BIO_WRITE) {
  1901. if ((bp->b_flags & B_VALIDSUSPWRT) == 0 &&
  1902. bp->b_vp != nil && bp->b_vp->v_mount != nil &&
  1903. (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0)
  1904. panic("ffs_geom_strategy: bad I/O");
  1905. nocopy = bp->b_flags & B_NOCOPY;
  1906. bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY);
  1907. if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 &&
  1908. vp->v_rdev->si_snapdata != nil) {
  1909. if ((bp->b_flags & B_CLUSTER) != 0) {
  1910. runningbufwakeup(bp);
  1911. TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
  1912. b_cluster.cluster_entry) {
  1913. error = ffs_copyonwrite(vp, tbp);
  1914. if (error != 0 &&
  1915. error != EOPNOTSUPP) {
  1916. bp->b_error = error;
  1917. bp->b_ioflags |= BIO_ERROR;
  1918. bufdone(bp);
  1919. return;
  1920. }
  1921. }
  1922. bp->b_runningbufspace = bp->b_bufsize;
  1923. atomic_add_long(&runningbufspace,
  1924. bp->b_runningbufspace);
  1925. } else {
  1926. error = ffs_copyonwrite(vp, bp);
  1927. if (error != 0 && error != EOPNOTSUPP) {
  1928. bp->b_error = error;
  1929. bp->b_ioflags |= BIO_ERROR;
  1930. bufdone(bp);
  1931. return;
  1932. }
  1933. }
  1934. }
  1935. #ifdef SOFTUPDATES
  1936. if ((bp->b_flags & B_CLUSTER) != 0) {
  1937. TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
  1938. b_cluster.cluster_entry) {
  1939. if (!LIST_EMPTY(&tbp->b_dep))
  1940. buf_start(tbp);
  1941. }
  1942. } else {
  1943. if (!LIST_EMPTY(&bp->b_dep))
  1944. buf_start(bp);
  1945. }
  1946. #endif
  1947. }
  1948. g_vfs_strategy(bo, bp);
  1949. }
  1950. int
  1951. ffs_own_mount(const struct mount *mp)
  1952. {
  1953. if (mp->mnt_op == &ufs_vfsops)
  1954. return (1);
  1955. return (0);
  1956. }
  1957. #ifdef DDB
  1958. #ifdef SOFTUPDATES
  1959. /* defined in ffs_softdep.c */
  1960. extern void db_print_ffs(struct ufsmount *ump);
  1961. int
  1962. DB_SHOW_COMMAND (int ffs, int db_show_ffs)
  1963. {
  1964. struct mount *mp;
  1965. struct ufsmount *ump;
  1966. if (have_addr) {
  1967. ump = VFSTOUFS((struct mount *)addr);
  1968. db_print_ffs(ump);
  1969. return;
  1970. }
  1971. TAILQ_FOREACH(mp, &mountlist, mnt_list) {
  1972. if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name))
  1973. db_print_ffs(VFSTOUFS(mp));
  1974. }
  1975. }
  1976. #endif /* SOFTUPDATES */
  1977. #endif /* DDB */
  1978. #endif // 0