Permalink
Name already in use
A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
codeql-action/lib/analyze.js
Go to fileThis commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
This commit adds a `packs` option to the codeql-config.yml file. Users can specify a list of ql packs to include in the analysis. For a single language analysis, the packs property looks like this: ```yaml packs: - pack-scope/pack-name1@1.2.3 - pack-scope/pack-name2 # no explicit version means download the latest ``` For multi-language analysis, you must key the packs block by lanaguage: ```yaml packs: cpp: - pack-scope/pack-name1@1.2.3 - pack-scope/pack-name2 java: - pack-scope/pack-name3@1.2.3 - pack-scope/pack-name4 ``` This implementation adds a new analysis run (alongside custom and builtin runs). The unit tests indicate that the correct commands are being run, but I have not actually tried this with a real CLI. Also, convert `instanceof Array` to `Array.isArray` since that is sightly better in some situations. See: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/isArray#instanceof_vs_isarray
221 lines (221 sloc)
10.9 KB
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
"use strict"; | |
var __importStar = (this && this.__importStar) || function (mod) { | |
if (mod && mod.__esModule) return mod; | |
var result = {}; | |
if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k]; | |
result["default"] = mod; | |
return result; | |
}; | |
Object.defineProperty(exports, "__esModule", { value: true }); | |
const fs = __importStar(require("fs")); | |
const path = __importStar(require("path")); | |
const toolrunner = __importStar(require("@actions/exec/lib/toolrunner")); | |
const analysisPaths = __importStar(require("./analysis-paths")); | |
const codeql_1 = require("./codeql"); | |
const count_loc_1 = require("./count-loc"); | |
const languages_1 = require("./languages"); | |
const sharedEnv = __importStar(require("./shared-environment")); | |
const upload_lib_1 = require("./upload-lib"); | |
const util = __importStar(require("./util")); | |
class CodeQLAnalysisError extends Error { | |
constructor(queriesStatusReport, message) { | |
super(message); | |
this.name = "CodeQLAnalysisError"; | |
this.queriesStatusReport = queriesStatusReport; | |
} | |
} | |
exports.CodeQLAnalysisError = CodeQLAnalysisError; | |
async function setupPythonExtractor(logger) { | |
const codeqlPython = process.env["CODEQL_PYTHON"]; | |
if (codeqlPython === undefined || codeqlPython.length === 0) { | |
// If CODEQL_PYTHON is not set, no dependencies were installed, so we don't need to do anything | |
return; | |
} | |
let output = ""; | |
const options = { | |
listeners: { | |
stdout: (data) => { | |
output += data.toString(); | |
}, | |
}, | |
}; | |
await new toolrunner.ToolRunner(codeqlPython, [ | |
"-c", | |
"import os; import pip; print(os.path.dirname(os.path.dirname(pip.__file__)))", | |
], options).exec(); | |
logger.info(`Setting LGTM_INDEX_IMPORT_PATH=${output}`); | |
process.env["LGTM_INDEX_IMPORT_PATH"] = output; | |
output = ""; | |
await new toolrunner.ToolRunner(codeqlPython, ["-c", "import sys; print(sys.version_info[0])"], options).exec(); | |
logger.info(`Setting LGTM_PYTHON_SETUP_VERSION=${output}`); | |
process.env["LGTM_PYTHON_SETUP_VERSION"] = output; | |
} | |
async function createdDBForScannedLanguages(config, logger) { | |
// Insert the LGTM_INDEX_X env vars at this point so they are set when | |
// we extract any scanned languages. | |
analysisPaths.includeAndExcludeAnalysisPaths(config); | |
const codeql = codeql_1.getCodeQL(config.codeQLCmd); | |
for (const language of config.languages) { | |
if (languages_1.isScannedLanguage(language)) { | |
logger.startGroup(`Extracting ${language}`); | |
if (language === languages_1.Language.python) { | |
await setupPythonExtractor(logger); | |
} | |
await codeql.extractScannedLanguage(util.getCodeQLDatabasePath(config, language), language); | |
logger.endGroup(); | |
} | |
} | |
} | |
async function finalizeDatabaseCreation(config, threadsFlag, logger) { | |
await createdDBForScannedLanguages(config, logger); | |
const codeql = codeql_1.getCodeQL(config.codeQLCmd); | |
for (const language of config.languages) { | |
logger.startGroup(`Finalizing ${language}`); | |
await codeql.finalizeDatabase(util.getCodeQLDatabasePath(config, language), threadsFlag); | |
logger.endGroup(); | |
} | |
} | |
// Runs queries and creates sarif files in the given folder | |
async function runQueries(sarifFolder, memoryFlag, addSnippetsFlag, threadsFlag, automationDetailsId, config, logger) { | |
const statusReport = {}; | |
// count the number of lines in the background | |
const locPromise = count_loc_1.countLoc(path.resolve(), | |
// config.paths specifies external directories. the current | |
// directory is included in the analysis by default. Replicate | |
// that here. | |
config.paths, config.pathsIgnore, config.languages, logger); | |
for (const language of config.languages) { | |
logger.startGroup(`Analyzing ${language}`); | |
const queries = config.queries[language]; | |
const packsWithVersion = config.packs[language] || []; | |
if (queries === undefined || | |
(queries.builtin.length === 0 && queries.custom.length === 0)) { | |
throw new Error(`Unable to analyse ${language} as no queries were selected for this language`); | |
} | |
try { | |
let analysisSummaryBuiltIn = ""; | |
const customAnalysisSummaries = []; | |
if (queries["builtin"].length > 0) { | |
const startTimeBuiltIn = new Date().getTime(); | |
const { sarifFile, stdout } = await runQueryGroup(language, "builtin", createQuerySuiteContents(queries["builtin"]), sarifFolder, undefined); | |
analysisSummaryBuiltIn = stdout; | |
await injectLinesOfCode(sarifFile, language, locPromise); | |
statusReport[`analyze_builtin_queries_${language}_duration_ms`] = | |
new Date().getTime() - startTimeBuiltIn; | |
} | |
const startTimeCustom = new Date().getTime(); | |
const temporarySarifDir = config.tempDir; | |
const temporarySarifFiles = []; | |
for (let i = 0; i < queries["custom"].length; ++i) { | |
if (queries["custom"][i].queries.length > 0) { | |
const { sarifFile, stdout } = await runQueryGroup(language, `custom-${i}`, createQuerySuiteContents(queries["custom"][i].queries), temporarySarifDir, queries["custom"][i].searchPath); | |
customAnalysisSummaries.push(stdout); | |
temporarySarifFiles.push(sarifFile); | |
} | |
} | |
if (packsWithVersion.length > 0) { | |
const { sarifFile, stdout } = await runQueryGroup(language, "packs", createPackSuiteContents(packsWithVersion), temporarySarifDir, undefined); | |
customAnalysisSummaries.push(stdout); | |
temporarySarifFiles.push(sarifFile); | |
} | |
if (temporarySarifFiles.length > 0) { | |
const sarifFile = path.join(sarifFolder, `${language}-custom.sarif`); | |
fs.writeFileSync(sarifFile, upload_lib_1.combineSarifFiles(temporarySarifFiles)); | |
await injectLinesOfCode(sarifFile, language, locPromise); | |
statusReport[`analyze_custom_queries_${language}_duration_ms`] = | |
new Date().getTime() - startTimeCustom; | |
} | |
logger.endGroup(); | |
// Print the LoC baseline and the summary results from database analyze for the standard | |
// query suite and (if appropriate) each custom query suite. | |
logger.startGroup(`Analysis summary for ${language}`); | |
printLinesOfCodeSummary(logger, language, await locPromise); | |
logger.info(analysisSummaryBuiltIn); | |
for (const [i, customSummary] of customAnalysisSummaries.entries()) { | |
if (customSummary.trim() === "") { | |
continue; | |
} | |
const description = customAnalysisSummaries.length === 1 | |
? "custom queries" | |
: `custom query suite ${i + 1}/${customAnalysisSummaries.length}`; | |
logger.info(`Analysis summary for ${description}:`); | |
logger.info(""); | |
logger.info(customSummary); | |
logger.info(""); | |
} | |
logger.endGroup(); | |
} | |
catch (e) { | |
logger.info(e); | |
statusReport.analyze_failure_language = language; | |
throw new CodeQLAnalysisError(statusReport, `Error running analysis for ${language}: ${e}`); | |
} | |
} | |
return statusReport; | |
async function runQueryGroup(language, type, querySuiteContents, destinationFolder, searchPath) { | |
const databasePath = util.getCodeQLDatabasePath(config, language); | |
// Pass the queries to codeql using a file instead of using the command | |
// line to avoid command line length restrictions, particularly on windows. | |
const querySuitePath = `${databasePath}-queries-${type}.qls`; | |
fs.writeFileSync(querySuitePath, querySuiteContents); | |
logger.debug(`Query suite file for ${language}-${type}...\n${querySuiteContents}`); | |
const sarifFile = path.join(destinationFolder, `${language}-${type}.sarif`); | |
const codeql = codeql_1.getCodeQL(config.codeQLCmd); | |
const databaseAnalyzeStdout = await codeql.databaseAnalyze(databasePath, sarifFile, searchPath, querySuitePath, memoryFlag, addSnippetsFlag, threadsFlag, automationDetailsId); | |
logger.debug(`SARIF results for database ${language} created at "${sarifFile}"`); | |
return { sarifFile, stdout: databaseAnalyzeStdout }; | |
} | |
} | |
exports.runQueries = runQueries; | |
function createQuerySuiteContents(queries) { | |
return queries.map((q) => `- query: ${q}`).join("\n"); | |
} | |
function createPackSuiteContents(packsWithVersion) { | |
return packsWithVersion.map(packWithVersionToQuerySuiteEntry).join("\n"); | |
} | |
function packWithVersionToQuerySuiteEntry(pack) { | |
let text = `- qlpack: ${pack.packName}`; | |
if (pack.version) { | |
text += `${"\n"} version: ${pack.version}`; | |
} | |
return text; | |
} | |
async function runAnalyze(outputDir, memoryFlag, addSnippetsFlag, threadsFlag, automationDetailsId, config, logger) { | |
// Delete the tracer config env var to avoid tracing ourselves | |
delete process.env[sharedEnv.ODASA_TRACER_CONFIGURATION]; | |
fs.mkdirSync(outputDir, { recursive: true }); | |
logger.info("Finalizing database creation"); | |
await finalizeDatabaseCreation(config, threadsFlag, logger); | |
logger.info("Analyzing database"); | |
const queriesStats = await runQueries(outputDir, memoryFlag, addSnippetsFlag, threadsFlag, automationDetailsId, config, logger); | |
return { ...queriesStats }; | |
} | |
exports.runAnalyze = runAnalyze; | |
async function injectLinesOfCode(sarifFile, language, locPromise) { | |
const lineCounts = await locPromise; | |
const idPrefix = count_loc_1.getIdPrefix(language); | |
if (language in lineCounts) { | |
const sarif = JSON.parse(fs.readFileSync(sarifFile, "utf8")); | |
if (Array.isArray(sarif.runs)) { | |
for (const run of sarif.runs) { | |
const ruleId = `${idPrefix}/summary/lines-of-code`; | |
run.properties = run.properties || {}; | |
run.properties.metricResults = run.properties.metricResults || []; | |
const rule = run.properties.metricResults.find( | |
// the rule id can be in either of two places | |
(r) => { var _a; return r.ruleId === ruleId || ((_a = r.rule) === null || _a === void 0 ? void 0 : _a.id) === ruleId; }); | |
// only add the baseline value if the rule already exists | |
if (rule) { | |
rule.baseline = lineCounts[language]; | |
} | |
} | |
} | |
fs.writeFileSync(sarifFile, JSON.stringify(sarif)); | |
} | |
} | |
function printLinesOfCodeSummary(logger, language, lineCounts) { | |
if (language in lineCounts) { | |
logger.info(`Counted ${lineCounts[language]} lines of code for ${language} as a baseline.`); | |
} | |
} | |
//# sourceMappingURL=analyze.js.map |