Bug 1738598 - sunset Coverity in Firefox.

Differential Revision: https://phabricator.services.mozilla.com/D129779
This commit is contained in:
Andi-Bogdan Postelnicu 2021-10-31 17:18:02 +00:00
parent 050541241a
commit 80fddabd67
10 changed files with 0 additions and 1265 deletions

View file

@ -145,16 +145,6 @@ jobs:
mozilla-esr91: [{hour: 10, minute: 0}] mozilla-esr91: [{hour: 10, minute: 0}]
elm: [{hour: 10, minute: 0}] elm: [{hour: 10, minute: 0}]
- name: coverity-tree-analysis
job:
type: decision-task
treeherder-symbol: CoverityTA
target-tasks-method: coverity_static_analysis_full
run-on-projects:
- mozilla-central
when:
- {hour: 10, minute: 0}
- name: linux64-clang-trunk-perf - name: linux64-clang-trunk-perf
job: job:
type: decision-task type: decision-task

View file

@ -38,11 +38,6 @@ In this document, we try to list these all tools.
- `bug 712350 <https://bugzilla.mozilla.org/show_bug.cgi?id=712350>`__ - `bug 712350 <https://bugzilla.mozilla.org/show_bug.cgi?id=712350>`__
- -
- https://clang-analyzer.llvm.org/ - https://clang-analyzer.llvm.org/
* - Coverity
-
- `bug 1230156 <https://bugzilla.mozilla.org/show_bug.cgi?id=1230156>`__
-
-
* - cpp virtual final * - cpp virtual final
- -
- -
@ -166,4 +161,3 @@ In this document, we try to list these all tools.
- -
- -
- https://github.com/adrienverge/yamllint - https://github.com/adrienverge/yamllint

View file

@ -31,9 +31,6 @@ integrating this process with Phabricator and mach. A list of some
checkers that are used during automated scan can be found checkers that are used during automated scan can be found
`here <https://searchfox.org/mozilla-central/source/tools/clang-tidy/config.yaml>`__. `here <https://searchfox.org/mozilla-central/source/tools/clang-tidy/config.yaml>`__.
We are also running Coverity at review phase and a few times per day
(when autoland is merged into mozilla-central).
Static analysis at review phase Static analysis at review phase
------------------------------- -------------------------------

View file

@ -4,7 +4,6 @@
from __future__ import absolute_import, print_function, unicode_literals from __future__ import absolute_import, print_function, unicode_literals
import concurrent.futures import concurrent.futures
import io
import logging import logging
import json import json
import multiprocessing import multiprocessing
@ -16,7 +15,6 @@ import re
import sys import sys
import subprocess import subprocess
import shutil import shutil
import tarfile
import tempfile import tempfile
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
import yaml import yaml
@ -184,11 +182,6 @@ _format_include_extensions = (".cpp", ".c", ".cc", ".h", ".m", ".mm")
# File contaning all paths to exclude from formatting # File contaning all paths to exclude from formatting
_format_ignore_file = ".clang-format-ignore" _format_ignore_file = ".clang-format-ignore"
# List of file extension to consider (should start with dot)
_check_syntax_include_extensions = (".cpp", ".c", ".cc", ".cxx")
_cov_config = None
# (TOOLS) Function return codes # (TOOLS) Function return codes
TOOLS_SUCCESS = 0 TOOLS_SUCCESS = 0
TOOLS_FAILED_DOWNLOAD = 1 TOOLS_FAILED_DOWNLOAD = 1
@ -412,591 +405,13 @@ def check(
if output is not None: if output is not None:
output_manager.write(output, format) output_manager.write(output, format)
if rc != 0:
return rc
# if we are building firefox for android it might be nice to
# also analyze the java code base
if command_context.substs["MOZ_BUILD_APP"] == "mobile/android":
rc = check_java(command_context, source, jobs, strip, verbose, skip_export=True)
return rc return rc
@StaticAnalysisSubCommand(
"static-analysis",
"check-coverity",
"Run coverity static-analysis tool on the given files. "
"Can only be run by automation! "
"It's result is stored as an json file on the artifacts server.",
)
@CommandArgument(
"source",
nargs="*",
default=[],
help="Source files to be analyzed by Coverity Static Analysis Tool. "
"This is ran only in automation.",
)
@CommandArgument(
"--output",
"-o",
default=None,
help="Write coverity output translated to json output in a file",
)
@CommandArgument(
"--coverity_output_path",
"-co",
default=None,
help="Path where to write coverity results as cov-results.json. "
"If no path is specified the default path from the coverity working "
"directory, ~./mozbuild/coverity is used.",
)
@CommandArgument(
"--outgoing",
default=False,
action="store_true",
help="Run coverity on outgoing files from mercurial or git repository",
)
@CommandArgument(
"--full-build",
default=False,
action="store_true",
help="Run a full build for coverity analisys.",
)
def check_coverity(
command_context,
source=[],
output=None,
coverity_output_path=None,
outgoing=False,
full_build=False,
verbose=False,
):
command_context._set_log_level(verbose)
command_context.activate_virtualenv()
command_context.log_manager.enable_unstructured()
if "MOZ_AUTOMATION" not in os.environ:
command_context.log(
logging.INFO,
"static-analysis",
{},
"Coverity based static-analysis cannot be ran outside automation.",
)
return
if full_build and outgoing:
command_context.log(
logging.INFO,
"static-analysis",
{},
"Coverity full build cannot be associated with outgoing.",
)
return
# Use outgoing files instead of source files
if outgoing:
repo = get_repository_object(command_context.topsrcdir)
files = repo.get_outgoing_files()
source = get_abspath_files(command_context, files)
# Verify that we have source files or we are dealing with a full-build
if len(source) == 0 and not full_build:
command_context.log(
logging.ERROR,
"static-analysis",
{},
"ERROR: There are no files that coverity can use to scan.",
)
return 0
# Load the configuration file for coverity static-analysis
# For the moment we store only the reliability index for each checker
# as the rest is managed on the https://github.com/mozilla/release-services side.
cov_config = _get_cov_config(command_context)
rc, cov = setup_coverity(command_context)
if rc != 0:
return rc
# First run cov-run-desktop --setup in order to setup the analysis env
# We need this in both cases, per patch analysis or full tree build
cmd = [cov.cov_run_desktop, "--setup"]
if run_cov_command(command_context, cmd, cov.cov_path):
# Avoiding a bug in Coverity where snapshot is not identified
# as beeing built with the current analysis binary.
if not full_build:
return 1
# Run cov-configure for clang, javascript and python
langs = ["clang", "javascript", "python"]
for lang in langs:
cmd = [cov.cov_configure, "--{}".format(lang)]
if run_cov_command(command_context, cmd):
return 1
if full_build:
# 1. Build the model file that is going to be used for analysis
model_path = mozpath.join("tools", "coverity", "model.cpp")
cmd = [cov.cov_make_library, "-sf", cov.cov_lic_path, model_path]
if run_cov_command(command_context, cmd):
return 1
# 2. Run cov-build
# Add cov_build command
cmd = [cov.cov_build, "--dir", "cov-int"]
# Add fs capture search paths for languages that are not nuilt
cmd += [
"--fs-capture-search={}".format(path) for path in cov.cov_capture_search
]
# Add the exclude criteria for test cases
cmd += [
"--fs-capture-search-exclude-regex",
".*/test",
"./mach",
"--log-no-times",
"build",
]
if run_cov_command(command_context, cmd):
return 1
# 3. Run cov-analyze and exclude disabled checkers
cmd = [
cov.cov_analyze,
"--dir",
"cov-int",
"--all",
"--enable-virtual",
"--strip-path={}".format(command_context.topsrcdir),
"-sf",
cov.cov_lic_path,
]
cmd += [
"--disable={}".format(key)
for key, checker in cov_config["coverity_checkers"].items()
if checker.get("publish", True) is False
]
if run_cov_command(command_context, cmd):
return 1
# 4. Run cov-commit-defects
protocol = "https" if cov.cov_server_ssl else "http"
server_url = "{0}://{1}:{2}".format(protocol, cov.cov_url, cov.cov_port)
cmd = [
cov.cov_commit_defects,
"--auth-key-file",
cov.cov_auth_path,
"--stream",
cov.cov_stream,
"--dir",
"cov-int",
"--url",
server_url,
"-sf",
cov.cov_lic_path,
]
if run_cov_command(command_context, cmd):
return 1
return 0
# TEMP Fix for Case# 00847671
cmd = [
cov.cov_configure,
"--delete-compiler-config",
"template-clangcc-config-0",
"coverity_config.xml",
]
if run_cov_command(command_context, cmd):
return 1
cmd = [
cov.cov_configure,
"--delete-compiler-config",
"template-clangcxx-config-0",
"coverity_config.xml",
]
if run_cov_command(command_context, cmd):
return 1
cmd = [
cov.cov_configure,
"--clang",
"--xml-option",
"append_arg:--ppp_translator",
"--xml-option",
"append_arg:replace/\{([a-zA-Z]+::None\(\))\}/=$1",
]
if run_cov_command(command_context, cmd):
return 1
# End for Case# 00847671
rc, compile_db, compilation_commands_path = _build_compile_db(
command_context, verbose=verbose
)
rc = rc or _build_export(command_context, jobs=2, verbose=verbose)
if rc != 0:
return rc
commands_list = get_files_with_commands(command_context, compile_db, source)
if len(commands_list) == 0:
command_context.log(
logging.INFO,
"static-analysis",
{},
"There are no files that need to be analyzed.",
)
return 0
# For each element in commands_list run `cov-translate`
for element in commands_list:
def transform_cmd(cmd):
# Coverity Analysis has a problem translating definitions passed as:
# '-DSOME_DEF="ValueOfAString"', please see Bug 1588283.
return [re.sub(r'\'-D(.*)="(.*)"\'', r'-D\1="\2"', arg) for arg in cmd]
build_command = element["command"].split(" ")
cmd = [cov.cov_translate, "--dir", cov.cov_idir_path] + transform_cmd(
build_command
)
if run_cov_command(command_context, cmd, element["directory"]):
return 1
if coverity_output_path is None:
cov_result = mozpath.join(cov.cov_state_path, "cov-results.json")
else:
cov_result = mozpath.join(coverity_output_path, "cov-results.json")
# Once the capture is performed we need to do the actual Coverity Desktop analysis
cmd = [
cov.cov_run_desktop,
"--json-output-v6",
cov_result,
"--analyze-captured-source",
]
if run_cov_command(command_context, cmd, cov.cov_state_path):
return 1
if output is not None:
dump_cov_artifact(command_context, cov_config, cov_result, source, output)
def get_abspath_files(command_context, files): def get_abspath_files(command_context, files):
return [mozpath.join(command_context.topsrcdir, f) for f in files] return [mozpath.join(command_context.topsrcdir, f) for f in files]
def run_cov_command(command_context, cmd, path=None):
if path is None:
# We want to run it in topsrcdir
path = command_context.topsrcdir
command_context.log(logging.INFO, "static-analysis", {}, "Running " + " ".join(cmd))
rc = command_context.run_process(
args=cmd, cwd=path, pass_thru=True, ensure_exit_code=False
)
if rc != 0:
command_context.log(
logging.ERROR,
"static-analysis",
{},
"ERROR: Running " + " ".join(cmd) + " failed!",
)
return rc
return 0
def get_reliability_index_for_cov_checker(command_context, cov_config, checker_name):
if cov_config is None:
command_context.log(
logging.INFO,
"static-analysis",
{},
"Coverity config file not found, "
"using default-value 'reliablity' = medium. for checker {}".format(
checker_name
),
)
return "medium"
checkers = cov_config["coverity_checkers"]
if checker_name not in checkers:
command_context.log(
logging.INFO,
"static-analysis",
{},
"Coverity checker {} not found to determine reliability index. "
"For the moment we shall use the default 'reliablity' = medium.".format(
checker_name
),
)
return "medium"
if "reliability" not in checkers[checker_name]:
# This checker doesn't have a reliability index
command_context.log(
logging.INFO,
"static-analysis",
{},
"Coverity checker {} doesn't have a reliability index set, "
"field 'reliability is missing', please cosinder adding it. "
"For the moment we shall use the default 'reliablity' = medium.".format(
checker_name
),
)
return "medium"
return checkers[checker_name]["reliability"]
def dump_cov_artifact(command_context, cov_config, cov_results, source, output):
# Parse Coverity json into structured issues
with open(cov_results) as f:
result = json.load(f)
# Parse the issues to a standard json format
issues_dict = {"files": {}}
files_list = issues_dict["files"]
def build_element(issue):
# We look only for main event
event_path = next(
(event for event in issue["events"] if event["main"] is True), None
)
dict_issue = {
"line": issue["mainEventLineNumber"],
"flag": issue["checkerName"],
"message": event_path["eventDescription"],
"reliability": get_reliability_index_for_cov_checker(
command_context, cov_config, issue["checkerName"]
),
"extra": {
"category": issue["checkerProperties"]["category"],
"stateOnServer": issue["stateOnServer"],
"stack": [],
},
}
# Embed all events into extra message
for event in issue["events"]:
dict_issue["extra"]["stack"].append(
{
"file_path": build_repo_relative_path(
event["strippedFilePathname"], command_context.topsrcdir
),
"line_number": event["lineNumber"],
"path_type": event["eventTag"],
"description": event["eventDescription"],
}
)
return dict_issue
for issue in result["issues"]:
path = build_repo_relative_path(
issue["strippedMainEventFilePathname"], command_context.topsrcdir
)
# Skip clang diagnostic messages
if issue["checkerName"].startswith("RW.CLANG"):
continue
if path is None:
# Since we skip a result we should log it
command_context.log(
logging.INFO,
"static-analysis",
{},
"Skipping CID: {0} from file: {1} since it's not related "
"with the current patch.".format(
issue["stateOnServer"]["cid"],
issue["strippedMainEventFilePathname"],
),
)
continue
if path in files_list:
files_list[path]["warnings"].append(build_element(issue))
else:
files_list[path] = {"warnings": [build_element(issue)]}
with open(output, "w") as f:
json.dump(issues_dict, f)
def get_coverity_secrets(command_context):
from gecko_taskgraph.util.taskcluster import get_root_url
secret_name = "project/relman/coverity"
secrets_url = "{}/secrets/v1/secret/{}".format(get_root_url(True), secret_name)
command_context.log(
logging.INFO,
"static-analysis",
{},
'Using symbol upload token from the secrets service: "{}"'.format(secrets_url),
)
import requests
res = requests.get(secrets_url)
res.raise_for_status()
secret = res.json()
cov_config = secret["secret"] if "secret" in secret else None
cov = SimpleNamespace()
if cov_config is None:
command_context.log(
logging.ERROR,
"static-analysis",
{},
"ERROR: Ill formatted secret for Coverity. Aborting analysis.",
)
return 1, cov
cov.cov_analysis_url = cov_config.get("package_url")
cov.cov_package_name = cov_config.get("package_name")
cov.cov_url = cov_config.get("server_url")
cov.cov_server_ssl = cov_config.get("server_ssl", True)
# In case we don't have a port in the secret we use the default one,
# for a default coverity deployment.
cov.cov_port = cov_config.get("server_port", 8443)
cov.cov_auth = cov_config.get("auth_key")
cov.cov_package_ver = cov_config.get("package_ver")
cov.cov_lic_name = cov_config.get("lic_name")
cov.cov_capture_search = cov_config.get("fs_capture_search", None)
cov.cov_full_stack = cov_config.get("full_stack", False)
cov.cov_stream = cov_config.get("stream", False)
return 0, cov
def download_coverity(command_context, cov):
if (
cov.cov_url is None
or cov.cov_port is None
or cov.cov_analysis_url is None
or cov.cov_auth is None
):
command_context.log(
logging.ERROR,
"static-analysis",
{},
"ERROR: Missing Coverity secret on try job!",
)
return 1
COVERITY_CONFIG = """
{
"type": "Coverity configuration",
"format_version": 1,
"settings": {
"server": {
"host": "%s",
"ssl" : true,
"port": %s,
"on_new_cert" : "trust",
"auth_key_file": "%s"
},
"stream": "Firefox",
"cov_run_desktop": {
"build_cmd": [],
"clean_cmd": []
}
}
}
"""
# Generate the coverity.conf and auth files
cov.cov_auth_path = mozpath.join(cov.cov_state_path, "auth")
cov_setup_path = mozpath.join(cov.cov_state_path, "coverity.conf")
cov_conf = COVERITY_CONFIG % (cov.cov_url, cov.cov_port, cov.cov_auth_path)
def download(artifact_url, target):
import requests
command_context.log_manager.enable_unstructured()
resp = requests.get(artifact_url, verify=False, stream=True)
command_context.log_manager.disable_unstructured()
resp.raise_for_status()
# Extract archive into destination
with tarfile.open(fileobj=io.BytesIO(resp.content)) as tar:
tar.extractall(target)
download(cov.cov_analysis_url, cov.cov_state_path)
with open(cov.cov_auth_path, "w") as f:
f.write(cov.cov_auth)
# Modify it's permission to 600
os.chmod(cov.cov_auth_path, 0o600)
with open(cov_setup_path, "a") as f:
f.write(cov_conf)
def setup_coverity(command_context, force_download=True):
rc, config, _ = _get_config_environment(command_context)
if rc != 0:
return rc, None
rc, cov = get_coverity_secrets(command_context)
if rc != 0:
return rc, cov
# Create a directory in mozbuild where we setup coverity
cov.cov_state_path = mozpath.join(
command_context._mach_context.state_dir, "coverity"
)
if force_download is True and os.path.exists(cov.cov_state_path):
shutil.rmtree(cov.cov_state_path)
os.mkdir(cov.cov_state_path)
# Download everything that we need for Coverity from out private instance
download_coverity(command_context, cov)
cov.cov_path = mozpath.join(cov.cov_state_path, cov.cov_package_name)
cov.cov_run_desktop = mozpath.join(cov.cov_path, "bin", "cov-run-desktop")
cov.cov_configure = mozpath.join(cov.cov_path, "bin", "cov-configure")
cov.cov_make_library = mozpath.join(cov.cov_path, "bin", "cov-make-library")
cov.cov_build = mozpath.join(cov.cov_path, "bin", "cov-build")
cov.cov_analyze = mozpath.join(cov.cov_path, "bin", "cov-analyze")
cov.cov_commit_defects = mozpath.join(cov.cov_path, "bin", "cov-commit-defects")
cov.cov_translate = mozpath.join(cov.cov_path, "bin", "cov-translate")
cov.cov_configure = mozpath.join(cov.cov_path, "bin", "cov-configure")
cov.cov_work_path = mozpath.join(cov.cov_state_path, "data-coverity")
cov.cov_idir_path = mozpath.join(cov.cov_work_path, cov.cov_package_ver, "idir")
cov.cov_lic_path = mozpath.join(
cov.cov_work_path, cov.cov_package_ver, "lic", cov.cov_lic_name
)
if not os.path.exists(cov.cov_path):
command_context.log(
logging.ERROR,
"static-analysis",
{},
"ERROR: Missing Coverity in {}".format(cov.cov_path),
)
return 1, cov
return 0, cov
def get_files_with_commands(command_context, compile_db, source): def get_files_with_commands(command_context, compile_db, source):
""" """
Returns an array of dictionaries having file_path with build command Returns an array of dictionaries having file_path with build command
@ -1031,24 +446,6 @@ def get_clang_tidy_config(command_context):
return ClangTidyConfig(command_context.topsrcdir) return ClangTidyConfig(command_context.topsrcdir)
def _get_cov_config(command_context):
try:
file_handler = open(
mozpath.join(command_context.topsrcdir, "tools", "coverity", "config.yaml")
)
config = yaml.safe_load(file_handler)
except Exception:
command_context.log(
logging.ERROR,
"static-analysis",
{},
"ERROR: Looks like config.yaml is not valid, we are going to use default"
" values for the rest of the analysis for coverity.",
)
return None
return config
def _get_required_version(command_context): def _get_required_version(command_context):
version = get_clang_tidy_config(command_context).version version = get_clang_tidy_config(command_context).version
if version is None: if version is None:

View file

@ -1,95 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
---
job-defaults:
# Run only on try and code-review tasks
# to avoid running Coverity SA on the whole codebase
run-on-projects: []
platform: linux64/debug
worker-type: t-linux-xlarge-source
worker:
docker-image: {in-tree: debian11-amd64-build}
max-run-time: 5400
treeherder:
kind: other
tier: 2
run:
using: run-task
tooltool-downloads: public
fetches:
toolchain:
- linux64-clang-10
- linux64-rust
- linux64-cbindgen
- linux64-nasm
- linux64-node
- sysroot-x86_64-linux-gnu
- sysroot-wasm32-wasi
when:
# Extension list from https://hg.mozilla.org/mozilla-central/file/default/python/mozbuild/mozbuild/mach_commands.py#l1664
files-changed:
- '**/*.c'
- '**/*.cpp'
- '**/*.cc'
- '**/*.cxx'
- '**/*.m'
- '**/*.mm'
- '**/*.h'
- '**/*.hh'
- '**/*.hpp'
- '**/*.hxx'
coverity:
description: Run static-analysis (Coverity) on C/C++ patches
attributes:
code-review: true
treeherder:
symbol: cpp(coverity)
run:
cwd: '{checkout}'
command: >-
source taskcluster/scripts/misc/source-test-clang-setup.sh &&
./mach --log-no-times static-analysis check-coverity --outgoing --output $HOME/coverity.json
scopes:
- secrets:get:project/relman/coverity
worker:
artifacts:
- type: file
name: public/code-review/coverity.json
path: /builds/worker/coverity.json
- type: file
name: public/code-review/coverity-orig.json
path: /builds/worker/workspace/coverity/cov-results.json
coverity-full-analysis:
description: Run Coverity based static-analysis on the entire Gecko repo
treeherder:
symbol: Static-Analysis(coverity-full-analysis)
worker-type:
by-platform:
linux64.*: b-linux-xlarge
run-on-projects: []
run:
cwd: '{checkout}'
command: >-
source taskcluster/scripts/misc/source-test-clang-setup.sh &&
./mach --log-no-times static-analysis check-coverity --full-build
scopes:
- secrets:get:project/relman/coverity
worker:
max-run-time: 14400
when:
files-changed:
- '**/*.c'
- '**/*.cpp'
- '**/*.cc'
- '**/*.cxx'
- '**/*.m'
- '**/*.mm'
- '**/*.h'
- '**/*.hh'
- '**/*.hpp'
- '**/*.hxx'
- '**/*.py'
- '**/*.js'

View file

@ -19,7 +19,6 @@ kind-dependencies:
jobs-from: jobs-from:
- clang.yml - clang.yml
- codeql.yml - codeql.yml
- coverity.yml
- cram.yml - cram.yml
- doc.yml - doc.yml
- file-metadata.yml - file-metadata.yml

View file

@ -976,13 +976,6 @@ def target_tasks_searchfox(full_task_graph, parameters, graph_config):
] ]
# Run Coverity Static Analysis once daily.
@_target_task("coverity_static_analysis_full")
def target_tasks_coverity_full(full_task_graph, parameters, graph_config):
"""Select tasks required to run Coverity Static Analysis"""
return ["source-test-coverity-coverity-full-analysis"]
# Run build linux64-plain-clang-trunk/opt on mozilla-central/beta with perf tests # Run build linux64-plain-clang-trunk/opt on mozilla-central/beta with perf tests
@_target_task("linux64_clang_trunk_perf") @_target_task("linux64_clang_trunk_perf")
def target_tasks_build_linux64_clang_trunk_perf( def target_tasks_build_linux64_clang_trunk_perf(

View file

@ -1,508 +0,0 @@
---
# It is used by 'mach static-analysis check-coverity' and
# 'phabricator static-analysis bot', on automation, in order to determine
# how prone to false-positive a checker is.
#
# In order to update this file please do the following:
# 1. Obtain the coverity-analysis package.
# 2. Run cov-analyze `./cov-analyze --list-checkers.
# 3. Add the new checker(s) from step 2. to the list.
# 4. Depending on the reliability of the checker please set `reliability` field,
# otherwise `medium` will be used as an reliability index.
coverity_checkers:
COPY_PASTE_ERROR:
reliability: low
DEADCODE:
reliability: low
FORWARD_NULL:
reliability: high
IDENTICAL_BRANCHES:
reliability: high
CONSTANT_EXPRESSION_RESULT:
reliability: high
UNREACHABLE:
reliability: low
REVERSE_INULL:
reliability: high
UNEXPECTED_CONTROL_FLOW:
reliability: medium
NESTING_INDENT_MISMATCH:
reliability: high
STRAY_SEMICOLON:
publish: false
reliability: medium
RESOURCE_LEAK:
reliability: medium
NULL_RETURNS:
reliability: medium
DIVIDE_BY_ZERO:
reliability: medium
OVERFLOW_BEFORE_WIDEN:
reliability: high
UNINTENDED_INTEGER_DIVISION:
reliability: medium
SWAPPED_ARGUMENTS:
reliability: low
NO_EFFECT:
reliability: medium
BAD_SHIFT:
reliability: low
INFINITE_LOOP:
reliability: medium
MISSING_RESTORE:
reliability: low
UNUSED_VALUE:
reliability: medium
USELESS_CALL:
reliability: low
MISSING_BREAK:
reliability: low
CHECKED_RETURN:
reliability: low
PROPERTY_MIXUP:
reliability: medium
CALL_SUPER:
reliability: medium
IDENTIFIER_TYPO:
reliability: medium
USE_AFTER_FREE:
reliability: low
ALLOC_FREE_MISMATCH:
reliability: medium
ARRAY_VS_SINGLETON:
reliability: low
ASSERT_SIDE_EFFECT:
reliability: medium
BAD_ALLOC_ARITHMETIC:
reliability: medium
BAD_ALLOC_STRLEN:
reliability: medium
BAD_COMPARE:
reliability: medium
BAD_FREE:
reliability: medium
BAD_SIZEOF:
reliability: medium
CHAR_IO:
reliability: low
EVALUATION_ORDER:
reliability: medium
INCOMPATIBLE_CAST:
reliability: medium
MISSING_COMMA:
reliability: high
MISSING_RETURN:
reliability: medium
NEGATIVE_RETURNS:
reliability: low
OVERRUN:
reliability: low
PASS_BY_VALUE:
reliability: high
PRINTF_ARGS:
reliability: medium
READLINK:
reliability: medium
RETURN_LOCAL:
reliability: low
REVERSE_NEGATIVE:
reliability: medium
SIGN_EXTENSION:
reliability: low
SIZEOF_MISMATCH:
reliability: low
UNINIT:
reliability: high
VARARGS:
reliability: medium
INVALIDATE_ITERATOR:
reliability: medium
BAD_LOCK_OBJECT:
reliability: medium
GUARDED_BY_VIOLATION:
reliability: medium
LOCK_EVASION:
reliability: medium
MISSING_THROW:
reliability: medium
NON_STATIC_GUARDING_STATIC:
reliability: medium
VOLATILE_ATOMICITY:
reliability: medium
OVERLAPPING_COPY:
reliability: medium
BAD_OVERRIDE:
reliability: medium
CTOR_DTOR_LEAK:
reliability: low
DELETE_ARRAY:
reliability: low
DELETE_VOID:
reliability: medium
MISMATCHED_ITERATOR:
reliability: medium
MISSING_MOVE_ASSIGNMENT:
reliability: low
STREAM_FORMAT_STATE:
reliability: medium
UNCAUGHT_EXCEPT:
reliability: medium
UNINIT_CTOR:
reliability: high
VIRTUAL_DTOR:
reliability: medium
WRAPPER_ESCAPE:
reliability: low
BAD_EQ:
reliability: medium
BAD_EQ_TYPES:
reliability: medium
LOCK_INVERSION:
reliability: medium
BAD_CHECK_OF_WAIT_COND:
reliability: medium
DC.DANGEROUS:
reliability: medium
DC.DEADLOCK:
reliability: medium
HIBERNATE_BAD_HASHCODE:
reliability: medium
ORM_LOAD_NULL_CHECK:
reliability: medium
ORM_UNNECESSARY_GET:
reliability: medium
REGEX_CONFUSION:
reliability: medium
SERVLET_ATOMICITY:
reliability: medium
SINGLETON_RACE:
reliability: medium
WRONG_METHOD:
reliability: medium
PATH_MANIPULATION:
reliability: medium
SQLI:
reliability: medium
HARDCODED_CREDENTIALS:
reliability: medium
SENSITIVE_DATA_LEAK:
reliability: medium
SCRIPT_CODE_INJECTION:
reliability: medium
REGEX_INJECTION:
reliability: medium
BAD_CERT_VERIFICATION:
reliability: medium
COM.BAD_FREE:
reliability: medium
COM.BSTR.CONV:
reliability: medium
EXPLICIT_THIS_EXPECTED:
reliability: medium
UNINTENDED_GLOBAL:
reliability: medium
OS_CMD_INJECTION:
reliability: medium
XSS:
reliability: medium
WEAK_PASSWORD_HASH:
reliability: medium
UNSAFE_DESERIALIZATION:
reliability: medium
OPEN_REDIRECT:
reliability: medium
CSRF:
reliability: medium
UNSAFE_REFLECTION:
reliability: medium
BLACKLIST_FOR_AUTHN:
reliability: medium
DYNAMIC_OBJECT_ATTRIBUTES:
reliability: medium
RAILS_DEFAULT_ROUTES:
reliability: medium
RAILS_DEVISE_CONFIG:
reliability: medium
RAILS_MISSING_FILTER_ACTION:
reliability: medium
REGEX_MISSING_ANCHOR:
reliability: medium
RUBY_VULNERABLE_LIBRARY:
reliability: medium
SESSION_MANIPULATION:
reliability: medium
UNSAFE_BASIC_AUTH:
reliability: medium
UNSAFE_SESSION_SETTING:
reliability: medium
XPATH_INJECTION:
reliability: medium
RISKY_CRYPTO:
reliability: medium
UNENCRYPTED_SENSITIVE_DATA:
reliability: medium
XML_EXTERNAL_ENTITY:
reliability: medium
CONFIG.ATS_INSECURE:
reliability: medium
CUSTOM_KEYBOARD_DATA_LEAK:
reliability: medium
INSECURE_COMMUNICATION:
reliability: medium
INSECURE_MULTIPEER_CONNECTION:
reliability: medium
WEAK_BIOMETRIC_AUTH:
reliability: medium
BUFFER_SIZE:
reliability: high
CHROOT:
reliability: medium
DC.PREDICTABLE_KEY_PASSWORD:
reliability: medium
publish: !!bool no
DC.STREAM_BUFFER:
reliability: medium
publish: !!bool no
DC.WEAK_CRYPTO:
reliability: low
publish: !!bool no
OPEN_ARGS:
reliability: medium
STRING_NULL:
reliability: medium
STRING_OVERFLOW:
reliability: low
STRING_SIZE:
reliability: medium
TAINTED_SCALAR:
reliability: low
TAINTED_STRING:
reliability: medium
TOCTOU:
reliability: low
SECURE_TEMP:
reliability: medium
UNSAFE_XML_PARSE_CONFIG:
reliability: medium
ATOMICITY:
reliability: medium
LOCK:
reliability: medium
MISSING_LOCK:
reliability: medium
ORDER_REVERSAL:
reliability: medium
SLEEP:
reliability: medium
ASSIGN_NOT_RETURNING_STAR_THIS:
reliability: medium
COPY_WITHOUT_ASSIGN:
reliability: medium
MISSING_COPY_OR_ASSIGN:
reliability: medium
SELF_ASSIGN:
reliability: medium
WEAK_GUARD:
reliability: medium
AUDIT.SPECULATIVE_EXECUTION_DATA_LEAK:
reliability: medium
DC.STRING_BUFFER:
reliability: medium
publish: !!bool no
ENUM_AS_BOOLEAN:
reliability: medium
INTEGER_OVERFLOW:
reliability: low
MISRA_CAST:
reliability: medium
MIXED_ENUMS:
reliability: low
STACK_USE:
reliability: medium
USER_POINTER:
reliability: medium
PARSE_ERROR:
reliability: low
FLOATING_POINT_EQUALITY:
reliability: medium
ORM_LOST_UPDATE:
reliability: medium
HFA:
reliability: medium
COM.ADDROF_LEAK:
reliability: medium
COM.BSTR.ALLOC:
reliability: medium
COM.BSTR.BAD_COMPARE:
reliability: medium
COM.BSTR.NE_NON_BSTR:
reliability: medium
VCALL_IN_CTOR_DTOR:
reliability: medium
INSECURE_DIRECT_OBJECT_REFERENCE:
reliability: medium
UNESCAPED_HTML:
reliability: medium
SECURE_CODING:
reliability: medium
publish: !!bool no
SIZECHECK:
reliability: medium
MISSING_AUTHZ:
reliability: medium
NOSQL_QUERY_INJECTION:
reliability: medium
HEADER_INJECTION:
reliability: medium
INSECURE_RANDOM:
reliability: medium
CONFIG.DYNAMIC_DATA_HTML_COMMENT:
reliability: medium
LDAP_INJECTION:
reliability: medium
UNLOGGED_SECURITY_EXCEPTION:
reliability: medium
UNRESTRICTED_DISPATCH:
reliability: medium
UNSAFE_NAMED_QUERY:
reliability: medium
TAINT_ASSERT:
reliability: medium
UNKNOWN_LANGUAGE_INJECTION:
reliability: medium
URL_MANIPULATION:
reliability: medium
TAINTED_ENVIRONMENT_WITH_EXECUTION:
reliability: medium
ASPNET_MVC_VERSION_HEADER:
reliability: medium
CONFIG.ASPNET_VERSION_HEADER:
reliability: medium
CONFIG.ASP_VIEWSTATE_MAC:
reliability: medium
CONFIG.CONNECTION_STRING_PASSWORD:
reliability: medium
CONFIG.COOKIES_MISSING_HTTPONLY:
reliability: medium
CONFIG.DEAD_AUTHORIZATION_RULE:
reliability: medium
CONFIG.ENABLED_DEBUG_MODE:
reliability: medium
CONFIG.ENABLED_TRACE_MODE:
reliability: medium
CONFIG.MISSING_CUSTOM_ERROR_PAGE:
reliability: medium
PREDICTABLE_RANDOM_SEED:
reliability: medium
ATTRIBUTE_NAME_CONFLICT:
reliability: medium
CONFIG.DUPLICATE_SERVLET_DEFINITION:
reliability: medium
CONFIG.DWR_DEBUG_MODE:
reliability: medium
CONFIG.HTTP_VERB_TAMPERING:
reliability: medium
CONFIG.JAVAEE_MISSING_HTTPONLY:
reliability: medium
CONFIG.MISSING_GLOBAL_EXCEPTION_HANDLER:
reliability: medium
CONFIG.MISSING_JSF2_SECURITY_CONSTRAINT:
reliability: medium
CONFIG.SPRING_SECURITY_DEBUG_MODE:
reliability: medium
CONFIG.SPRING_SECURITY_DISABLE_AUTH_TAGS:
reliability: medium
CONFIG.SPRING_SECURITY_HARDCODED_CREDENTIALS:
reliability: medium
CONFIG.SPRING_SECURITY_REMEMBER_ME_HARDCODED_KEY:
reliability: medium
CONFIG.SPRING_SECURITY_SESSION_FIXATION:
reliability: medium
CONFIG.STRUTS2_CONFIG_BROWSER_PLUGIN:
reliability: medium
CONFIG.STRUTS2_DYNAMIC_METHOD_INVOCATION:
reliability: medium
CONFIG.STRUTS2_ENABLED_DEV_MODE:
reliability: medium
CONFIG.UNSAFE_SESSION_TIMEOUT:
reliability: medium
EL_INJECTION:
reliability: medium
JAVA_CODE_INJECTION:
reliability: medium
JCR_INJECTION:
reliability: medium
JSP_DYNAMIC_INCLUDE:
reliability: medium
JSP_SQL_INJECTION:
reliability: medium
OGNL_INJECTION:
reliability: medium
SESSION_FIXATION:
reliability: medium
TRUST_BOUNDARY_VIOLATION:
reliability: medium
UNSAFE_JNI:
reliability: medium
CONFIG.HANA_XS_PREVENT_XSRF_DISABLED:
reliability: medium
CONFIG.SEQUELIZE_ENABLED_LOGGING:
reliability: medium
COOKIE_INJECTION:
reliability: medium
CSS_INJECTION:
reliability: medium
DOM_XSS:
reliability: medium
INSECURE_SALT:
reliability: medium
INSUFFICIENT_LOGGING:
reliability: medium
LOCALSTORAGE_MANIPULATION:
reliability: medium
MISSING_IFRAME_SANDBOX:
reliability: medium
SESSIONSTORAGE_MANIPULATION:
reliability: medium
TEMPLATE_INJECTION:
reliability: medium
UNCHECKED_ORIGIN:
reliability: medium
UNRESTRICTED_MESSAGE_TARGET:
reliability: medium
ANGULAR_EXPRESSION_INJECTION:
reliability: medium
CONFIG.SYMFONY_CSRF_PROTECTION_DISABLED:
reliability: medium
SYMFONY_EL_INJECTION:
reliability: medium
LOG_INJECTION:
reliability: medium
SQL_NOT_CONSTANT:
reliability: medium
XML_INJECTION:
reliability: medium
INSECURE_COOKIE:
reliability: medium
ANGULAR_BYPASS_SECURITY:
reliability: medium
ANGULAR_ELEMENT_REFERENCE:
reliability: medium
LOCALSTORAGE_WRITE:
reliability: medium
ANDROID_CAPABILITY_LEAK:
reliability: medium
ANDROID_DEBUG_MODE:
reliability: medium
EXPOSED_PREFERENCES:
reliability: medium
IMPLICIT_INTENT:
reliability: medium
MISSING_PERMISSION_FOR_BROADCAST:
reliability: medium
MISSING_PERMISSION_ON_EXPORTED_COMPONENT:
reliability: medium
MOBILE_ID_MISUSE:
reliability: medium
UNRESTRICTED_ACCESS_TO_FILE:
reliability: medium

View file

@ -1,29 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
Coverity model file in order to avoid false-positive
*/
// In Bug 1248897 we've seen that Coverity thinks that json-cpp allocates
// memmory for the strings that are used as indexes, this is wrong and this
// model of CZString fixes this.
namespace Json {
class Value {
private:
class CZString {
private:
char const* cstr_;
public:
~CZString() {
// Don't do anything since most of the time cstr_ only stores address of
// str
__coverity_escape__(static_cast<void*>(cstr_));
}
};
};
} // namespace Json

View file

@ -13,9 +13,6 @@ with Files("code-coverage/**"):
with Files("compare-locales/mach_commands.py"): with Files("compare-locales/mach_commands.py"):
BUG_COMPONENT = ("Localization Infrastructure and Tools", "compare-locales") BUG_COMPONENT = ("Localization Infrastructure and Tools", "compare-locales")
with Files("coverity/**"):
BUG_COMPONENT = ("Firefox Build System", "Source Code Analysis")
with Files("github-sync/**"): with Files("github-sync/**"):
BUG_COMPONENT = ("Core", "Graphics") BUG_COMPONENT = ("Core", "Graphics")