db = $db; $this->setInterval(self::INTERVAL); // 1 day $this->setTimeSensitivity(self::TIME_INSENSITIVE); $this->logger = $logger; } /** * Makes the background job do its work * * @param array $argument unused argument */ public function run($argument) { $qbSelect = $this->db->getQueryBuilder(); $qbSelect->select('id') ->from('share', 's') ->leftJoin('s', 'filecache', 'fc', $qbSelect->expr()->eq('s.file_source', 'fc.fileid')) ->where($qbSelect->expr()->isNull('fc.fileid')) ->setMaxResults(self::CHUNK_SIZE); $deleteQb = $this->db->getQueryBuilder(); $deleteQb->delete('share') ->where( $deleteQb->expr()->in('id', $deleteQb->createParameter('ids'), IQueryBuilder::PARAM_INT_ARRAY) ); /** * Read a chunk of orphan rows and delete them. Continue as long as the * chunk is filled and time before the next cron run does not run out. * * Note: With isolation level READ COMMITTED, the database will allow * other transactions to delete rows between our SELECT and DELETE. In * that (unlikely) case, our DELETE will have fewer affected rows than * IDs passed for the WHERE IN. If this happens while processing a full * chunk, the logic below will stop prematurely. * Note: The queries below are optimized for low database locking. They * could be combined into one single DELETE with join or sub query, but * that has shown to (dead)lock often. */ $cutOff = $this->time->getTime() + self::INTERVAL; do { $deleted = $this->atomic(function () use ($qbSelect, $deleteQb) { $result = $qbSelect->executeQuery(); $ids = array_map('intval', $result->fetchAll(PDO::FETCH_COLUMN)); $result->closeCursor(); $deleteQb->setParameter('ids', $ids, IQueryBuilder::PARAM_INT_ARRAY); $deleted = $deleteQb->executeStatement(); $this->logger->debug("{deleted} orphaned share(s) deleted", [ 'app' => 'DeleteOrphanedSharesJob', 'deleted' => $deleted, ]); return $deleted; }, $this->db); } while ($deleted >= self::CHUNK_SIZE && $this->time->getTime() <= $cutOff); } }