Merge "Fix performance-for-range-copy warnings"

This commit is contained in:
Treehugger Robot 2018-12-11 21:01:25 +00:00 committed by Gerrit Code Review
commit db2e4888f8
3 changed files with 4 additions and 4 deletions

View File

@ -93,7 +93,7 @@ const WordProperty *OffdeviceIntermediateDict::getWordProperty(
const OffdeviceIntermediateDictPtNodeArray *ptNodeArray = &mRootPtNodeArray; const OffdeviceIntermediateDictPtNodeArray *ptNodeArray = &mRootPtNodeArray;
for (size_t i = 0; i < codePoints.size();) { for (size_t i = 0; i < codePoints.size();) {
bool foundNext = false; bool foundNext = false;
for (const auto ptNode : ptNodeArray->getPtNodeList()) { for (const auto& ptNode : ptNodeArray->getPtNodeList()) {
const CodePointArrayView ptNodeCodePoints = ptNode->getPtNodeCodePoints(); const CodePointArrayView ptNodeCodePoints = ptNode->getPtNodeCodePoints();
if (codePoints[i] < ptNodeCodePoints[0]) { if (codePoints[i] < ptNodeCodePoints[0]) {
continue; continue;

View File

@ -141,7 +141,7 @@ void Ver4PatriciaTriePolicy::iterateNgramEntries(const WordIdArrayView prevWordI
} }
const auto languageModelDictContent = mBuffers->getLanguageModelDictContent(); const auto languageModelDictContent = mBuffers->getLanguageModelDictContent();
for (size_t i = 1; i <= prevWordIds.size(); ++i) { for (size_t i = 1; i <= prevWordIds.size(); ++i) {
for (const auto entry : languageModelDictContent->getProbabilityEntries( for (const auto& entry : languageModelDictContent->getProbabilityEntries(
prevWordIds.limit(i))) { prevWordIds.limit(i))) {
const ProbabilityEntry &probabilityEntry = entry.getProbabilityEntry(); const ProbabilityEntry &probabilityEntry = entry.getProbabilityEntry();
if (!probabilityEntry.isValid()) { if (!probabilityEntry.isValid()) {
@ -516,7 +516,7 @@ const WordProperty Ver4PatriciaTriePolicy::getWordProperty(
int ngramPrevWordsCodePoints[MAX_PREV_WORD_COUNT_FOR_N_GRAM][MAX_WORD_LENGTH]; int ngramPrevWordsCodePoints[MAX_PREV_WORD_COUNT_FOR_N_GRAM][MAX_WORD_LENGTH];
int ngramPrevWordsCodePointCount[MAX_PREV_WORD_COUNT_FOR_N_GRAM]; int ngramPrevWordsCodePointCount[MAX_PREV_WORD_COUNT_FOR_N_GRAM];
bool ngramPrevWordIsBeginningOfSentense[MAX_PREV_WORD_COUNT_FOR_N_GRAM]; bool ngramPrevWordIsBeginningOfSentense[MAX_PREV_WORD_COUNT_FOR_N_GRAM];
for (const auto entry : languageModelDictContent->exportAllNgramEntriesRelatedToWord( for (const auto& entry : languageModelDictContent->exportAllNgramEntriesRelatedToWord(
mHeaderPolicy, wordId)) { mHeaderPolicy, wordId)) {
const int codePointCount = getCodePointsAndReturnCodePointCount(entry.getTargetWordId(), const int codePointCount = getCodePointsAndReturnCodePointCount(entry.getTargetWordId(),
MAX_WORD_LENGTH, ngramTargetCodePoints); MAX_WORD_LENGTH, ngramTargetCodePoints);

View File

@ -80,7 +80,7 @@ TEST(LanguageModelDictContentTest, TestIterateProbabilityEntry) {
languageModelDictContent.setProbabilityEntry(wordId, &originalEntry); languageModelDictContent.setProbabilityEntry(wordId, &originalEntry);
} }
std::unordered_set<int> wordIdSet(std::begin(wordIds), std::end(wordIds)); std::unordered_set<int> wordIdSet(std::begin(wordIds), std::end(wordIds));
for (const auto entry : languageModelDictContent.getProbabilityEntries(WordIdArrayView())) { for (const auto& entry : languageModelDictContent.getProbabilityEntries(WordIdArrayView())) {
EXPECT_EQ(originalEntry.getFlags(), entry.getProbabilityEntry().getFlags()); EXPECT_EQ(originalEntry.getFlags(), entry.getProbabilityEntry().getFlags());
EXPECT_EQ(originalEntry.getProbability(), entry.getProbabilityEntry().getProbability()); EXPECT_EQ(originalEntry.getProbability(), entry.getProbabilityEntry().getProbability());
wordIdSet.erase(entry.getWordId()); wordIdSet.erase(entry.getWordId());