From 1455dd2e73f68d782814b7aa7641319a45d935d4 Mon Sep 17 00:00:00 2001 From: Wannabe Date: Wed, 21 May 2025 10:06:47 +0800 Subject: [PATCH] =?UTF-8?q?=E5=A4=87=E4=BB=BD=E6=81=A2=E5=A4=8D=E6=A1=86?= =?UTF-8?q?=E6=9E=B6=E5=A4=87=E4=BB=BD=E4=BC=A0=E8=BE=93=E6=9C=BA=E5=88=B6?= =?UTF-8?q?=E4=BC=98=E5=8C=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Wannabe --- .../native/backup_ext/include/ext_extension.h | 23 ++- .../native/backup_ext/src/ext_extension.cpp | 89 ++++----- .../backup_ext/src/sub_ext_extension.cpp | 175 ++++++++++++++++++ tests/unittests/backup_utils/BUILD.gn | 33 ++++ .../b_utils/scan_file_singleton_test.cpp | 149 +++++++++++++++ utils/BUILD.gn | 1 + utils/include/b_utils/scan_file_singleton.h | 66 +++++++ utils/src/b_filesystem/b_dir.cpp | 13 +- utils/src/b_utils/scan_file_singleton.cpp | 107 +++++++++++ 9 files changed, 602 insertions(+), 54 deletions(-) create mode 100644 tests/unittests/backup_utils/b_utils/scan_file_singleton_test.cpp create mode 100644 utils/include/b_utils/scan_file_singleton.h create mode 100644 utils/src/b_utils/scan_file_singleton.cpp diff --git a/frameworks/native/backup_ext/include/ext_extension.h b/frameworks/native/backup_ext/include/ext_extension.h index 9d7ab287b..ff6aabe7a 100644 --- a/frameworks/native/backup_ext/include/ext_extension.h +++ b/frameworks/native/backup_ext/include/ext_extension.h @@ -74,6 +74,7 @@ public: threadPool_.Start(BConstants::EXTENSION_THREAD_POOL_COUNT); onProcessTaskPool_.Start(BConstants::EXTENSION_THREAD_POOL_COUNT); reportOnProcessRetPool_.Start(BConstants::EXTENSION_THREAD_POOL_COUNT); + doBackupPool_.Start(BConstants::EXTENSION_THREAD_POOL_COUNT); SetStagingPathProperties(); appStatistic_ = std::make_shared(); } @@ -83,6 +84,7 @@ public: threadPool_.Stop(); onProcessTaskPool_.Stop(); reportOnProcessRetPool_.Stop(); + doBackupPool_.Stop(); if (callJsOnProcessThread_.joinable()) { callJsOnProcessThread_.join(); } @@ -99,11 +101,21 @@ private: * @brief backup * * @param bigFileInfo bigfiles to be backup + * @param bigFileInfoBackuped bigfiles have been backuped * @param smallFiles smallfiles to be backup * @param includesNum sizeof includes * @param excludesNum sizeof excludes */ - int DoBackup(TarMap &bigFileInfo, map &smallFiles, uint32_t includesNum, uint32_t excludesNum); + int DoBackup(TarMap &bigFileInfo, TarMap &bigFileInfoBackuped, map &smallFiles, + uint32_t includesNum, uint32_t excludesNum); + + /** + * @brief backup + * + * @param bigFileInfo bigfiles to be backup + * @param backupedFileSize backuped file size + */ + int DoBackupBigFiles(TarMap &bigFileInfo, uint32_t backupedFileSize); /** * @brief restore @@ -210,7 +222,7 @@ private: int DoIncrementalBackupTask(UniqueFd incrementalFd, UniqueFd manifestFd); ErrCode IncrementalBigFileReady(TarMap &pkgInfo, const vector &bigInfos, sptr proxy); - ErrCode BigFileReady(TarMap &bigFileInfo, sptr proxy); + ErrCode BigFileReady(TarMap &bigFileInfo, sptr proxy, int backupedFileSize); void WaitToSendFd(std::chrono::system_clock::time_point &startTime, int &fdSendNum); void RefreshTimeInfo(std::chrono::system_clock::time_point &startTime, int &fdSendNum); void IncrementalPacket(const vector &infos, TarMap &tar, sptr proxy); @@ -362,6 +374,12 @@ private: void OnRestoreExFinish(); void DoBackupStart(); void DoBackupEnd(); + void CalculateDataSizeTask(const string &config); + void DoBackUpTask(const string &config); + TarMap convertFileToBigFiles(std::map files); + void PreDealExcludes(std::vector &excludes); + template + map MatchFiles(map files, vector endExcludes); private: pair> GetFileInfos(const vector &includes, const vector &excludes); void ReportAppStatistic(ErrCode errCode); @@ -399,6 +417,7 @@ private: std::atomic isFirstCallOnProcess_ {false}; std::atomic isExecAppDone_ {false}; OHOS::ThreadPool reportOnProcessRetPool_; + OHOS::ThreadPool doBackupPool_; std::mutex reportHashLock_; std::map reportHashSrcPathMap_; diff --git a/frameworks/native/backup_ext/src/ext_extension.cpp b/frameworks/native/backup_ext/src/ext_extension.cpp index 849fb2b59..d088e449b 100644 --- a/frameworks/native/backup_ext/src/ext_extension.cpp +++ b/frameworks/native/backup_ext/src/ext_extension.cpp @@ -52,6 +52,7 @@ #include "b_tarball/b_tarball_factory.h" #include "b_hiaudit/hi_audit.h" #include "b_utils/b_time.h" +#include "b_utils/scan_file_singleton.h" #include "filemgmt_libhilog.h" #include "hitrace_meter.h" #include "installd_un_tar_file.h" @@ -498,13 +499,14 @@ void BackupExtExtension::ClearNoPermissionFiles(TarMap &pkgInfo, vector proxy) +ErrCode BackupExtExtension::BigFileReady(TarMap &bigFileInfo, sptr proxy, int backupedFileSize) { HITRACE_METER_NAME(HITRACE_TAG_FILEMANAGEMENT, __PRETTY_FUNCTION__); - HILOGI("BigFileReady Begin: bigFileInfo file size is: %{public}zu", bigFileInfo.size()); + HILOGI("BigFileReady Begin: bigFileInfo file size is: %{public}zu, backupedFileSize is %{public}d", + bigFileInfo.size(), backupedFileSize); ErrCode ret {ERR_OK}; auto startTime = std::chrono::system_clock::now(); - int fdNum = 0; + int fdNum = backupedFileSize; vector noPermissionFiles; for (auto &item : bigFileInfo) { WaitToSendFd(startTime, fdNum); @@ -530,7 +532,7 @@ ErrCode BackupExtExtension::BigFileReady(TarMap &bigFileInfo, sptr pro RefreshTimeInfo(startTime, fdNum); } ClearNoPermissionFiles(bigFileInfo, noPermissionFiles); - HILOGI("BigFileReady End"); + HILOGI("BigFileReady End, fdNum is %{public}d", fdNum); return ret; } @@ -777,11 +779,12 @@ void BackupExtExtension::DoPacket(const map &srcFiles, TarMap &t appStatistic_->tarSpend_ = totalTarUs / MS_TO_US; } -int BackupExtExtension::DoBackup(TarMap &bigFileInfo, map &smallFiles, +int BackupExtExtension::DoBackup(TarMap &bigFileInfo, TarMap &fileBackupedInfo, map &smallFiles, uint32_t includesNum, uint32_t excludesNum) { HITRACE_METER_NAME(HITRACE_TAG_FILEMANAGEMENT, __PRETTY_FUNCTION__); - HILOGI("Start Do backup"); + HILOGI("bigFileInfo size: %{public}zu, fileBackupedInfo size %{public}zu, smallFiles size: %{public}zu", + bigFileInfo.size(), fileBackupedInfo.size(), smallFiles.size()); auto start = std::chrono::system_clock::now(); if (extension_ == nullptr) { HILOGE("Failed to do backup, extension is nullptr"); @@ -798,24 +801,25 @@ int BackupExtExtension::DoBackup(TarMap &bigFileInfo, map &small // 回传大文件 HILOGI("Will notify BigFileReady"); - auto res = BigFileReady(bigFileInfo, proxy); + auto res = BigFileReady(bigFileInfo, proxy, fileBackupedInfo.size()); HILOGI("Start packet Tar files"); // 分片打包, 回传tar包 TarMap tarMap {}; DoPacket(smallFiles, tarMap, proxy); bigFileInfo.insert(tarMap.begin(), tarMap.end()); + fileBackupedInfo.insert(bigFileInfo.begin(), bigFileInfo.end()); HILOGI("Do backup, DoPacket end"); HILOGI("Will notify IndexFileReady"); - if (auto ret = IndexFileReady(bigFileInfo, proxy); ret) { + if (auto ret = IndexFileReady(fileBackupedInfo, proxy); ret) { return ret; } HILOGI("HandleBackup finish, ret = %{public}d", res); auto end = std::chrono::system_clock::now(); auto cost = std::chrono::duration_cast(end - start).count(); - AppRadar::DoBackupInfo doBackupInfo = {cost, bigFileInfo.size(), smallFiles.size(), tarMap.size(), + AppRadar::DoBackupInfo doBackupInfo = {cost, fileBackupedInfo.size(), smallFiles.size(), tarMap.size(), includesNum, excludesNum}; RecordDoBackupRes(bundleName_, res, doBackupInfo); return res; @@ -836,6 +840,7 @@ tuple BackupExtExtension::CalculateDataSize(const B // 扫描文件计算数据量 tie(bigFileInfo, smallFiles) = GetFileInfos(includes, excludes); + ScanFileSingleton::GetInstance().SetCompeletedFlag(true); appStatistic_->smallFileCount_ = smallFiles.size(); appStatistic_->bigFileCount_ = bigFileInfo.size(); for (const auto &item : bigFileInfo) { @@ -1020,37 +1025,14 @@ void BackupExtExtension::AsyncTaskBackup(const string config) auto ptr = obj.promote(); BExcepUltils::BAssert(ptr, BError::Codes::EXT_BROKEN_FRAMEWORK, "Ext extension handle have been released"); try { - if (!ptr->StopExtTimer()) { - throw BError(BError::Codes::EXT_TIMER_ERROR, "Failed to stop extTimer"); - } - int64_t totalSize = 0; - TarMap bigFileInfo; - map smallFiles; - BJsonCachedEntity cachedEntity(config); - auto cache = cachedEntity.Structuralize(); - ptr->DoBackupStart(); - auto [err, includeSize, excludeSize] = ptr->CalculateDataSize(cache, totalSize, bigFileInfo, smallFiles); - if (err != ERR_OK) { - throw BError(BError::Codes::EXT_INVAL_ARG, "Failed to mkdir"); - } - if (!ptr->RefreshDataSize(totalSize)) { - throw BError(BError::Codes::EXT_INVAL_ARG, "Failed to RefreshDataSize"); - } - bool isFwkStart; - ptr->StartFwkTimer(isFwkStart); - if (!isFwkStart) { - HILOGE("Do backup, start fwk timer fail."); - throw BError(BError::Codes::EXT_TIMER_ERROR, "Failed to start fwkTimer"); - } - auto ret = ptr->DoBackup(bigFileInfo, smallFiles, includeSize, excludeSize); - ptr->DoBackupEnd(); - ptr->AppDone(ret); - HILOGI("backup app done %{public}d", ret); + ptr->CalculateDataSizeTask(config); } catch (const BError &e) { HILOGE("extension: AsyncTaskBackup error, err code:%{public}d", e.GetCode()); + ScanFileSingleton::GetInstance().SetCompeletedFlag(true); ptr->AppDone(e.GetCode()); } catch (...) { HILOGE("Failed to restore the ext bundle"); + ScanFileSingleton::GetInstance().SetCompeletedFlag(true); ptr->AppDone(BError(BError::Codes::EXT_INVAL_ARG).GetCode()); } ptr->DoClear(); @@ -1066,6 +1048,29 @@ void BackupExtExtension::AsyncTaskBackup(const string config) HILOGE("Failed to add task to thread pool"); } }); + + auto dobackupTask = [obj {wptr(this)}, config]() { + auto ptr = obj.promote(); + BExcepUltils::BAssert(ptr, BError::Codes::EXT_BROKEN_FRAMEWORK, "Ext extension handle have been released"); + try { + ptr->DoBackUpTask(config); + } catch (const BError &e) { + HILOGE("extension: AsyncTaskBackup error, err code:%{public}d", e.GetCode()); + ptr->AppDone(e.GetCode()); + } catch (...) { + HILOGE("Failed to restore the ext bundle"); + ptr->AppDone(BError(BError::Codes::EXT_INVAL_ARG).GetCode()); + } + ptr->DoClear(); + }; + + doBackupPool_.AddTask([dobackupTask]() { + try { + dobackupTask(); + } catch (...) { + HILOGE("Failed to add task to thread pool"); + } + }); } void BackupExtExtension::RestoreBigFilesForSpecialCloneCloud(const ExtManageInfo &item) @@ -2057,20 +2062,6 @@ ErrCode BackupExtExtension::IncrementalOnBackup(bool isClearData) return ERR_OK; } -ErrCode BackupExtExtension::GetIncrementalBackupFileHandle(UniqueFdGroup& fdGroup) -{ - auto [fd, reportFd] = GetIncrementalBackupFileHandle(); - fdGroup.fd = fd.Release(); - fdGroup.reportFd = reportFd.Release(); - return BError(BError::Codes::OK).GetCode(); -} - -tuple BackupExtExtension::GetIncrementalBackupFileHandle() -{ - HITRACE_METER_NAME(HITRACE_TAG_FILEMANAGEMENT, __PRETTY_FUNCTION__); - return {UniqueFd(-1), UniqueFd(-1)}; -} - static void WriteFile(const string &filename, const vector &srcFiles) { fstream f; diff --git a/frameworks/native/backup_ext/src/sub_ext_extension.cpp b/frameworks/native/backup_ext/src/sub_ext_extension.cpp index cf9de6eb4..ba248db3c 100644 --- a/frameworks/native/backup_ext/src/sub_ext_extension.cpp +++ b/frameworks/native/backup_ext/src/sub_ext_extension.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -50,6 +51,7 @@ #include "b_ohos/startup/backup_para.h" #include "b_radar/b_radar.h" #include "b_tarball/b_tarball_factory.h" +#include "b_utils/scan_file_singleton.h" #include "filemgmt_libhilog.h" #include "hitrace_meter.h" #include "sandbox_helper.h" @@ -1467,4 +1469,177 @@ void BackupExtExtension::RmBigFileReportForSpecialCloneCloud(const std::string & } reportHashSrcPathMap_.erase(iter); } + +void BackupExtExtension::CalculateDataSizeTask(const string &config) +{ + if (!StopExtTimer()) { + throw BError(BError::Codes::EXT_TIMER_ERROR, "Failed to stop extTimer"); + } + int64_t totalSize = 0; + TarMap bigFileInfo; + map smallFiles; + BJsonCachedEntity cachedEntity(config); + auto cache = cachedEntity.Structuralize(); + auto [err, includeSize, excludeSize] = CalculateDataSize(cache, totalSize, bigFileInfo, smallFiles); + ScanFileSingleton::GetInstance().SetIncludeSize(includeSize); + ScanFileSingleton::GetInstance().SetExcludeSize(excludeSize); + if (err != ERR_OK) { + throw BError(BError::Codes::EXT_INVAL_ARG, "Failed to mkdir"); + } + if (!RefreshDataSize(totalSize)) { + throw BError(BError::Codes::EXT_INVAL_ARG, "Failed to RefreshDataSize"); + } + bool isFwkStart; + StartFwkTimer(isFwkStart); + if (!isFwkStart) { + HILOGE("Do backup, start fwk timer fail."); + throw BError(BError::Codes::EXT_TIMER_ERROR, "Failed to start fwkTimer"); + } +} + +void BackupExtExtension::DoBackUpTask(const string &config) +{ + BJsonCachedEntity cachedEntity(config); + auto cache = cachedEntity.Structuralize(); + vector excludes = cache.GetExcludes(); + vector endExcludes = excludes; + PreDealExcludes(endExcludes); + + int ret = 0; + TarMap fileBackupedInfo; + while (!ScanFileSingleton::GetInstance().GetCompeletedFlag()) { + ScanFileSingleton::GetInstance().WaitForFiles(); + std::map incFiles = ScanFileSingleton::GetInstance().GetAllBigFiles(); + if (incFiles.empty()) { + continue; + } + map bigFiles = MatchFiles(incFiles, endExcludes); + TarMap bigFileInfo = convertFileToBigFiles(bigFiles); + ret = DoBackupBigFiles(bigFileInfo, fileBackupedInfo.size()); + fileBackupedInfo.insert(bigFileInfo.begin(), bigFileInfo.end()); + } + + map incSmallFiles = ScanFileSingleton::GetInstance().GetAllSmallFiles(); + map smallFiles = MatchFiles(incSmallFiles, endExcludes); + + std::map incFiles = ScanFileSingleton::GetInstance().GetAllBigFiles(); + map bigFiles = MatchFiles(incFiles, endExcludes); + TarMap bigFileInfo = convertFileToBigFiles(bigFiles); + uint32_t includeSize = ScanFileSingleton::GetInstance().GetIncludeSize(); + uint32_t excludeSize = ScanFileSingleton::GetInstance().GetExcludeSize(); + + fileBackupedInfo.insert(bigFileInfo.begin(), bigFileInfo.end()); + ret = DoBackup(bigFileInfo, fileBackupedInfo, smallFiles, includeSize, excludeSize); + AppDone(ret); + HILOGI("backup app done %{public}d", ret); +} + +template +std::map BackupExtExtension::MatchFiles(map files, vector endExcludes) +{ + auto isMatch = [](const vector &s, const string &str) -> bool { + if (str.empty() || s.empty()) { + return false; + } + for (const string &item : s) { + if (fnmatch(item.data(), str.data(), FNM_LEADING_DIR) == 0) { + return true; + } + } + return false; + }; + + std::map excludesFiles; + for (const auto &item : files) { + if (!isMatch(endExcludes, item.first)) { + excludesFiles.emplace(item); + } + } + return excludesFiles; +} + +int BackupExtExtension::DoBackupBigFiles(TarMap &bigFileInfo, uint32_t backupedFileSize) +{ + HITRACE_METER_NAME(HITRACE_TAG_FILEMANAGEMENT, __PRETTY_FUNCTION__); + HILOGI("Start do backup big files, bigFileInfo size: %{public}zu", bigFileInfo.size()); + if (extension_ == nullptr) { + HILOGE("Failed to do backup big files, extension is nullptr."); + return EPERM; + } + if (extension_->GetExtensionAction() != BConstants::ExtensionAction::BACKUP) { + HILOGE("Failed to do backup big files, extension action is not back up."); + return EPERM; + } + + auto proxy = ServiceClient::GetInstance(); + if (proxy == nullptr) { + HILOGE("Failed to do backup big files, proxy is nullptr."); + return EPERM; + } + + auto res = BigFileReady(bigFileInfo, proxy, backupedFileSize); + HILOGI("HandleBackup finish, ret = %{public}d", res); + return res; +} + +TarMap BackupExtExtension::convertFileToBigFiles(std::map files) +{ + auto getStringHash = [](const TarMap &m, const string &str) -> string { + ostringstream strHex; + strHex << hex; + + hash strHash; + size_t szHash = strHash(str); + strHex << setfill('0') << setw(BConstants::BIG_FILE_NAME_SIZE) << szHash; + string name = strHex.str(); + for (int i = 0; m.find(name) != m.end(); ++i, strHex.str("")) { + szHash = strHash(str + to_string(i)); + strHex << setfill('0') << setw(BConstants::BIG_FILE_NAME_SIZE) << szHash; + name = strHex.str(); + } + return name; + }; + + TarMap bigFileInfo; + for (const auto& item : files) { + string md5Name = getStringHash(bigFileInfo, item.first); + if (!md5Name.empty()) { + bigFileInfo.emplace(md5Name, make_tuple(item.first, item.second, true)); + } + } + return bigFileInfo; +} + +void BackupExtExtension::PreDealExcludes(std::vector &excludes) +{ + size_t lenEx = excludes.size(); + int j = 0; + for (size_t i = 0; i < lenEx; ++i) { + if (!excludes[i].empty()) { + if (excludes[i].at(excludes[i].size() - 1) == BConstants::FILE_SEPARATOR_CHAR) { + excludes[i] += "*"; + } + if (excludes[i].find(BConstants::FILE_SEPARATOR_CHAR) != string::npos && + excludes[i].at(0) != BConstants::FILE_SEPARATOR_CHAR) { + excludes[i] = BConstants::FILE_SEPARATOR_CHAR + excludes[i]; + } + excludes[j++] = excludes[i]; + } + } + excludes.resize(j); +} + +ErrCode BackupExtExtension::GetIncrementalBackupFileHandle(UniqueFdGroup& fdGroup) +{ + auto [fd, reportFd] = GetIncrementalBackupFileHandle(); + fdGroup.fd = fd.Release(); + fdGroup.reportFd = reportFd.Release(); + return BError(BError::Codes::OK).GetCode(); +} + +tuple BackupExtExtension::GetIncrementalBackupFileHandle() +{ + HITRACE_METER_NAME(HITRACE_TAG_FILEMANAGEMENT, __PRETTY_FUNCTION__); + return {UniqueFd(-1), UniqueFd(-1)}; +} } // namespace OHOS::FileManagement::Backup diff --git a/tests/unittests/backup_utils/BUILD.gn b/tests/unittests/backup_utils/BUILD.gn index 36b24da70..4ef61325c 100644 --- a/tests/unittests/backup_utils/BUILD.gn +++ b/tests/unittests/backup_utils/BUILD.gn @@ -390,6 +390,38 @@ ohos_unittest("b_json_clear_data_test") { use_exceptions = true } +ohos_unittest("b_utils_test") { + branch_protector_ret = "pac_ret" + sanitize = { + integer_overflow = true + cfi = true + cfi_cross_dso = true + debug = false + } + + module_out_path = path_module_out_tests + + sources = [ + "b_utils\scan_file_singleton_test.cpp" + ] + + include_dirs = [ "${path_backup}/utils/src/b_utils" ] + + deps = [ + "${path_backup}/interfaces/innerkits/native:sandbox_helper_native", + "${path_backup}/tests/utils:backup_test_utils", + "${path_backup}/utils/:backup_utils", + ] + + external_deps = [ + "c_utils:utils", + "hilog:libhilog", + "jsoncpp:jsoncpp", + ] + + use_exceptions = true +} + group("backup_utils_test") { testonly = true @@ -405,5 +437,6 @@ group("backup_utils_test") { ":b_process_test", ":b_tarball_cmdline_test", ":b_tarball_factory_test", + ":b_utils_test", ] } diff --git a/tests/unittests/backup_utils/b_utils/scan_file_singleton_test.cpp b/tests/unittests/backup_utils/b_utils/scan_file_singleton_test.cpp new file mode 100644 index 000000000..67dfb8d8c --- /dev/null +++ b/tests/unittests/backup_utils/b_utils/scan_file_singleton_test.cpp @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2025-2026 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include + +#include +#include + +#include "b_utils/scan_file_singleton.h" +#include "test_manager.h" + +namespace OHOS::FileManagement::Backup { +using namespace std; + +class ScanFileSingletonTest : public testing::Test { +public: + static void SetUpTestCase(void) {}; + static void TearDownTestCase() {}; + void SetUp() {}; + void TearDown() {}; +}; + +/** + * @brief 测试ScanFileSingleton的单例特性 + * @tc.number: SUB_scan_file_singleton_GetInstance_0100 + * @tc.name: scan_file_singleton_GetInstance_0100 + * @tc.desc: 测试ScanFileSingleton是否为单例,即多次获取实例返回相同对象 + * @tc.size: SMALL + * @tc.type: FUNC + * @tc.level Level 1 + */ +HWTEST_F(ScanFileSingletonTest, scan_file_singleton_GetInstance_0100, testing::ext::TestSize.Level1) +{ + auto& instance1 = ScanFileSingleton::GetInstance(); + auto& instance2 = ScanFileSingleton::GetInstance(); + EXPECT_EQ(&instance1, &instance2) << "ScanFileSingleton should be a singleton, but it's not."; +} + +/** + * @brief 测试获取所有大文件 + * @tc.number: SUB_scan_file_singleton_GetAllBigfiles_0100 + * @tc.name: scan_file_singleton_GetAllBigfiles_0100 + * @tc.desc: 测试ScanFileSingleton的GetAllBigfiles方法是否能正确获取所有文件信息 + * @tc.size: MEDIUM + * @tc.type: FUNC + * @tc.level Level 1 + */ +HWTEST_F(ScanFileSingletonTest, scan_file_singleton_GetAllBigfiles_0100, testing::ext::TestSize.Level1) +{ + auto& instance = ScanFileSingleton::GetInstance(); + struct stat fileStat = {}; + // 假设文件大小为2048字节 + fileStat.st_size = 2048; + instance.AddBigFile("/path/to/another_file", fileStat); + + auto allBigFiles = instance.GetAllBigFiles(); + + EXPECT_EQ(allBigFiles.size(), 1) << "There should be one big file in queue."; + EXPECT_EQ(allBigFiles["/path/to/another_file"].st_size, 2048) << "the file size should be 2048 bytes."; +} + +/** + * @brief 测试设置和获取完成标志 + * @tc.number: SUB_scan_file_singleton_SetGetCompeletedFlag_0100 + * @tc.name: scan_file_singleton_SetGetCompeletedFlag_0100 + * @tc.desc: 测试ScanFileSingleton的SetCompeletedFlag和GetCompeletedFlag方法是否能正确设置和获取完成标志 + * @tc.size: MEDIUM + * @tc.type: FUNC + * @tc.level Level 1 + */ +HWTEST_F(ScanFileSingletonTest, scan_file_singleton_SetGetCompeletedFlag_0100, testing::ext::TestSize.Level1) +{ + auto& instance = ScanFileSingleton::GetInstance(); + instance.SetCompeletedFlag(true); + EXPECT_TRUE(instance.GetCompeletedFlag()) << "The completed flag should be true."; + + instance.SetCompeletedFlag(false); + EXPECT_FALSE(instance.GetCompeletedFlag()) << "The completed flag should be false."; +} + +/** + * @brief 测试添加和获取小文件信息 + * @tc.number: SUB_scan_file_singleton_GetAllSmallFiles_0100 + * @tc.name: scan_file_singleton_GetAllSmallFiles_0100 + * @tc.desc: 测试ScanFileSingleton的GetAllBigfiles方法是否能正确添加和获取小文件信息 + * @tc.size: MEDIUM + * @tc.type: FUNC + * @tc.level Level 1 + */ +HWTEST_F(ScanFileSingletonTest, scan_file_singleton_GetAllSmallFiles_0100, testing::ext::TestSize.Level1) +{ + auto& instance = ScanFileSingleton::GetInstance(); + + instance.AddSmallFile("/path/to/small_file.txt", 512); + auto allSmallFiels = instance.GetAllSmallFiles(); + + EXPECT_EQ(allSmallFiels.size(), 1) << "There should be one small file in queue."; + EXPECT_EQ(allSmallFiels["/path/to/small_file.txt"], 512) << "The file size should be 512 bytes."; +} + +/** + * @brief 测试设置和获取includeSize + * @tc.number: SUB_scan_file_singleton_SetGetIncludeSize_0100 + * @tc.name: scan_file_singleton_SetGetIncludeSize_0100 + * @tc.desc: 测试ScanFileSingleton的SetIncludeSize和GetIncludeSize方法是否能正确设置和获取includeSize + * @tc.size: MEDIUM + * @tc.type: FUNC + * @tc.level Level 1 + */ +HWTEST_F(ScanFileSingletonTest, scan_file_singleton_SetGetIncludeSize_0100, testing::ext::TestSize.Level1) +{ + auto& instance = ScanFileSingleton::GetInstance(); + + instance.SetIncludeSize(100); + EXPECT_EQ(instance.GetIncludeSize(), 100) << "The include size should be 100."; +} + +/** + * @brief 测试设置和获取excludeSize + * @tc.number: SUB_scan_file_singleton_SetGetExcludeSize_0100 + * @tc.name: scan_file_singleton_SetGetExcludeSize_0100 + * @tc.desc: 测试ScanFileSingleton的SetExcludeSize和GetExcludeSize方法是否能正确设置和获取excludeSize + * @tc.size: MEDIUM + * @tc.type: FUNC + * @tc.level Level 1 + */ +HWTEST_F(ScanFileSingletonTest, scan_file_singleton_SetGetExcludeSize_0100, testing::ext::TestSize.Level1) +{ + auto& instance = ScanFileSingleton::GetInstance(); + + instance.SetExcludeSize(100); + EXPECT_EQ(instance.GetExcludeSize(), 100) << "The exclude size should be 100."; +} +} // namespace OHOS::FileManagement::Backup \ No newline at end of file diff --git a/utils/BUILD.gn b/utils/BUILD.gn index de06ec613..90f31d3e6 100644 --- a/utils/BUILD.gn +++ b/utils/BUILD.gn @@ -83,6 +83,7 @@ ohos_shared_library("backup_utils") { "src/b_tarball/b_tarball_cmdline.cpp", "src/b_tarball/b_tarball_factory.cpp", "src/b_utils/b_time.cpp", + "src/b_utils/scan_file_singleton.cpp", ] configs = [ diff --git a/utils/include/b_utils/scan_file_singleton.h b/utils/include/b_utils/scan_file_singleton.h new file mode 100644 index 000000000..6fde188d8 --- /dev/null +++ b/utils/include/b_utils/scan_file_singleton.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2025-2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_FILEMGMT_BACKUP_SCAN_FILE_SINGLETON_H +#define OHOS_FILEMGMT_BACKUP_SCAN_FILE_SINGLETON_H + +#include +#include +#include +#include +#include + +namespace OHOS::FileManagement::Backup { +class ScanFileSingleton { +public: + static ScanFileSingleton &GetInstance(); + + void AddBigFile(const std::string& key, const struct stat& value); + + void AddSmallFile(const std::string& key, size_t value); + + std::map GetAllBigFiles(); + + bool GetCompeletedFlag(); + + void SetCompeletedFlag(bool value); + + std::map GetAllSmallFiles(); + + void SetIncludeSize(uint32_t includeSize); + + void SetExcludeSize(uint32_t excludeSize); + + uint32_t GetIncludeSize(); + + uint32_t GetExcludeSize(); + + // 条件等待,等待文件被添加或者扫描完成 + void WaitForFiles(); +private: + // 私有构造函数,防止外部实例化 + ScanFileSingleton() {} + ~ScanFileSingleton(); + + std::queue> bigFileQueue_; + std::map smallFiles_; + std::mutex mutexLock_; + std::condition_variable waitForFilesAddCv_; + bool isCalculateCompeleted_ = false; + uint32_t includeSize_ = 0; + uint32_t excludeSize_ = 0; +}; +} // namespace OHOS::FileManagement::ScanFileSingleton +#endif // OHOS_FILEMGMT_BACKUP_SCAN_FILE_SINGLETON_H \ No newline at end of file diff --git a/utils/src/b_filesystem/b_dir.cpp b/utils/src/b_filesystem/b_dir.cpp index 174767445..185d87356 100644 --- a/utils/src/b_filesystem/b_dir.cpp +++ b/utils/src/b_filesystem/b_dir.cpp @@ -35,6 +35,7 @@ #include "errors.h" #include "filemgmt_libhilog.h" #include "sandbox_helper.h" +#include "b_utils/scan_file_singleton.h" namespace OHOS::FileManagement::Backup { using namespace std; @@ -93,6 +94,12 @@ static uint32_t CheckOverLongPath(const string &path) return len; } +static void InsertSmallFiles(std::map &smallFiles, std::string fileName, size_t size) +{ + ScanFileSingleton::GetInstance().AddSmallFile(fileName, size); + smallFiles.emplace(make_pair(fileName, size)); +} + static tuple, map> GetDirFilesDetail(const string &path, bool recursion, off_t size = -1) @@ -105,7 +112,7 @@ static tuple, map> GetDirFiles if (path.at(path.size()-1) != BConstants::FILE_SEPARATOR_CHAR) { newPath += BConstants::FILE_SEPARATOR_CHAR; } - smallFiles.emplace(make_pair(newPath, 0)); + InsertSmallFiles(smallFiles, newPath, 0); return {ERR_OK, files, smallFiles}; } @@ -126,10 +133,10 @@ static tuple, map> GetDirFiles continue; } if (sta.st_size <= size) { - smallFiles.emplace(make_pair(fileName, sta.st_size)); + InsertSmallFiles(smallFiles, fileName, sta.st_size); continue; } - + ScanFileSingleton::GetInstance().AddBigFile(fileName, sta); files.try_emplace(fileName, sta); continue; } else if (ptr->d_type != DT_DIR) { diff --git a/utils/src/b_utils/scan_file_singleton.cpp b/utils/src/b_utils/scan_file_singleton.cpp new file mode 100644 index 000000000..86fd425ca --- /dev/null +++ b/utils/src/b_utils/scan_file_singleton.cpp @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2025-2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "b_utils/scan_file_singleton.h" +#include + +namespace OHOS::FileManagement::Backup { + +ScanFileSingleton& ScanFileSingleton::GetInstance() +{ + static ScanFileSingleton instance; + return instance; +} + +ScanFileSingleton::~ScanFileSingleton() +{ + isCalculateCompeleted_ = false; +} + +void ScanFileSingleton::AddBigFile(const std::string& key, const struct stat& value) +{ + std::lock_guard lock(mutexLock_); + bigFileQueue_.push({key, value}); + waitForFilesAddCv_.notify_all(); +} + +void ScanFileSingleton::AddSmallFile(const std::string& key, size_t value) +{ + std::lock_guard lock(mutexLock_); + smallFiles_[key] = value; +} + +std::map ScanFileSingleton::GetAllBigFiles() +{ + std::lock_guard lock(mutexLock_); + std::map fileMap; + while (!bigFileQueue_.empty()) { + fileMap[bigFileQueue_.front().first] = bigFileQueue_.front().second; + bigFileQueue_.pop(); + } + return fileMap; +} + +bool ScanFileSingleton::GetCompeletedFlag() +{ + std::lock_guard lock(mutexLock_); + return isCalculateCompeleted_; +} + +void ScanFileSingleton::SetCompeletedFlag(bool value) +{ + std::lock_guard lock(mutexLock_); + isCalculateCompeleted_ = value; + if (value) { + waitForFilesAddCv_.notify_all(); + } +} + +std::map ScanFileSingleton::GetAllSmallFiles() +{ + std::lock_guard lock(mutexLock_); + return smallFiles_; +} + +void ScanFileSingleton::SetIncludeSize(uint32_t includeSize) +{ + std::lock_guard lock(mutexLock_); + includeSize_ = includeSize; +} + +void ScanFileSingleton::SetExcludeSize(uint32_t excludeSize) +{ + std::lock_guard lock(mutexLock_); + excludeSize_ = excludeSize; +} + +uint32_t ScanFileSingleton::GetIncludeSize() +{ + std::lock_guard lock(mutexLock_); + return includeSize_; +} + +uint32_t ScanFileSingleton::GetExcludeSize() +{ + std::lock_guard lock(mutexLock_); + return excludeSize_; +} + +void ScanFileSingleton::WaitForFiles() +{ + HILOGI("calculate is uncompleted, need to wait"); + std::unique_lock lock(mutexLock_); + waitForFilesAddCv_.wait(lock, [this] {return !bigFileQueue_.empty() || isCalculateCompeleted_; }); +} + +} // namespace OHOS::FileManagement::Backup \ No newline at end of file -- Gitee