vendor Catch2 and ETL

This commit is contained in:
2024-10-29 10:49:46 +01:00
parent 3915e0d641
commit 5173292491
1763 changed files with 959387 additions and 71 deletions

View File

@ -0,0 +1,243 @@
#!/usr/bin/env python3
import io
import os
import sys
import subprocess
import re
import difflib
import shutil
import scriptCommon
from scriptCommon import catchPath
if os.name == 'nt':
# Enable console colours on windows
os.system('')
rootPath = os.path.join(catchPath, 'tests/SelfTest/Baselines')
# Init so it is guaranteed to fail loudly if the scoping gets messed up
outputDirPath = None
if len(sys.argv) == 3:
cmdPath = sys.argv[1]
outputDirBasePath = sys.argv[2]
outputDirPath = os.path.join(outputDirBasePath, 'ApprovalTests')
if not os.path.isdir(outputDirPath):
os.mkdir(outputDirPath)
else:
print('Usage: {} path-to-SelfTest-executable path-to-temp-output-dir'.format(sys.argv[0]))
exit(1)
def get_rawResultsPath(baseName):
return os.path.join(outputDirPath, '_{0}.tmp'.format(baseName))
def get_baselinesPath(baseName):
return os.path.join(rootPath, '{0}.approved.txt'.format(baseName))
def _get_unapprovedPath(path, baseName):
return os.path.join(path, '{0}.unapproved.txt'.format(baseName))
def get_filteredResultsPath(baseName):
return _get_unapprovedPath(outputDirPath, baseName)
def get_unapprovedResultsPath(baseName):
return _get_unapprovedPath(rootPath, baseName)
langFilenameParser = re.compile(r'(.+\.[ch]pp)')
filelocParser = re.compile(r'''
(?P<path_prefix>tests/SelfTest/(?:\w+/)*) # We separate prefix and fname, so that
(?P<filename>\w+\.tests\.[ch]pp) # we can keep only filename
(?::|\() # Linux has : as separator between fname and line number, Windows uses (
(\d*) # line number
\)? # Windows also uses an ending separator, )
''', re.VERBOSE)
lineNumberParser = re.compile(r' line="[0-9]*"')
hexParser = re.compile(r'\b(0[xX][0-9a-fA-F]+)\b')
# Note: junit must serialize time with 3 (or or less) decimal places
# before generalizing this parser, make sure that this is checked
# in other places too.
junitDurationsParser = re.compile(r' time="[0-9]+\.[0-9]{3}"')
durationParser = re.compile(r''' duration=['"][0-9]+['"]''')
timestampsParser = re.compile(r'\d{4}-\d{2}-\d{2}T\d{2}\:\d{2}\:\d{2}Z')
versionParser = re.compile(r'[0-9]+\.[0-9]+\.[0-9]+(-\w*\.[0-9]+)?')
nullParser = re.compile(r'\b(__null|nullptr)\b')
exeNameParser = re.compile(r'''
\b
SelfTest # Expected executable name
(?:.exe)? # Executable name contains .exe on Windows.
\b
''', re.VERBOSE)
# This is a hack until something more reasonable is figured out
specialCaseParser = re.compile(r'file\((\d+)\)')
sinceEpochParser = re.compile(r'\d+ .+ since epoch')
# The weird OR is there to always have at least empty string for group 1
tapTestNumParser = re.compile(r'^((?:not ok)|(?:ok)|(?:warning)|(?:info)) (\d+) -')
overallResult = 0
def diffFiles(fileA, fileB):
with io.open(fileA, 'r', encoding='utf-8', errors='surrogateescape') as file:
aLines = [line.rstrip() for line in file.readlines()]
with io.open(fileB, 'r', encoding='utf-8', errors='surrogateescape') as file:
bLines = [line.rstrip() for line in file.readlines()]
shortenedFilenameA = fileA.rsplit(os.sep, 1)[-1]
shortenedFilenameB = fileB.rsplit(os.sep, 1)[-1]
diff = difflib.unified_diff(aLines, bLines, fromfile=shortenedFilenameA, tofile=shortenedFilenameB, n=0)
return [line for line in diff if line[0] in ('+', '-')]
def normalizeFilepath(line):
# Sometimes the path separators used by compiler and Python can differ,
# so we try to match the path with both forward and backward path
# separators, to make the paths relative to Catch2 repo root.
forwardSlashPath = catchPath.replace('\\', '/')
if forwardSlashPath in line:
line = line.replace(forwardSlashPath + '/', '')
backwardSlashPath = catchPath.replace('/', '\\')
if backwardSlashPath in line:
line = line.replace(backwardSlashPath + '\\', '')
m = langFilenameParser.match(line)
if m:
filepath = m.group(0)
# go from \ in windows paths to /
filepath = filepath.replace('\\', '/')
# remove start of relative path
filepath = filepath.replace('../', '')
line = line[:m.start()] + filepath + line[m.end():]
return line
def filterLine(line, isCompact):
line = normalizeFilepath(line)
# strip source line numbers
# Note that this parser assumes an already normalized filepath from above,
# and might break terribly if it is moved around before the normalization.
line = filelocParser.sub('\g<filename>:<line number>', line)
line = lineNumberParser.sub(" ", line)
if isCompact:
line = line.replace(': FAILED', ': failed')
line = line.replace(': PASSED', ': passed')
# strip out the test order number in TAP to avoid massive diffs for every change
line = tapTestNumParser.sub("\g<1> {test-number} -", line)
# strip Catch2 version number
line = versionParser.sub("<version>", line)
# replace *null* with 0
line = nullParser.sub("0", line)
# strip executable name
line = exeNameParser.sub("<exe-name>", line)
# strip hexadecimal numbers (presumably pointer values)
line = hexParser.sub("0x<hex digits>", line)
# strip durations and timestamps
line = junitDurationsParser.sub(' time="{duration}"', line)
line = durationParser.sub(' duration="{duration}"', line)
line = timestampsParser.sub('{iso8601-timestamp}', line)
line = specialCaseParser.sub('file:\g<1>', line)
line = sinceEpochParser.sub('{since-epoch-report}', line)
return line
def run_test(baseName, args):
args[0:0] = [cmdPath]
if not os.path.exists(cmdPath):
raise Exception("Executable doesn't exist at " + cmdPath)
print(args)
rawResultsPath = get_rawResultsPath(baseName)
f = open(rawResultsPath, 'w')
subprocess.call(args, stdout=f, stderr=f)
f.close()
def check_outputs(baseName):
global overallResult
rawResultsPath = get_rawResultsPath(baseName)
baselinesPath = get_baselinesPath(baseName)
filteredResultsPath = get_filteredResultsPath(baseName)
rawFile = io.open(rawResultsPath, 'r', encoding='utf-8', errors='surrogateescape')
filteredFile = io.open(filteredResultsPath, 'w', encoding='utf-8', errors='surrogateescape')
for line in rawFile:
filteredFile.write(filterLine(line, 'compact' in baseName).rstrip() + "\n")
filteredFile.close()
rawFile.close()
os.remove(rawResultsPath)
print()
print(baseName + ":")
if not os.path.exists(baselinesPath):
print( 'first approval')
overallResult += 1
return
diffResult = diffFiles(baselinesPath, filteredResultsPath)
if diffResult:
print('\n'.join(diffResult))
print(" \n****************************\n \033[91mResults differed\033[0m")
overallResult += 1
shutil.move(filteredResultsPath, get_unapprovedResultsPath(baseName))
else:
os.remove(filteredResultsPath)
print(" \033[92mResults matched\033[0m")
def approve(baseName, args):
run_test(baseName, args)
check_outputs(baseName)
print("Running approvals against executable:")
print(" " + cmdPath)
base_args = ["--order", "lex", "--rng-seed", "1", "--colour-mode", "none"]
## special cases first:
# Standard console reporter
approve("console.std", ["~[!nonportable]~[!benchmark]~[approvals] *"] + base_args)
# console reporter, include passes, warn about No Assertions, limit failures to first 4
approve("console.swa4", ["~[!nonportable]~[!benchmark]~[approvals] *", "-s", "-w", "NoAssertions", "-x", "4"] + base_args)
## Common reporter checks: include passes, warn about No Assertions
reporters = ('console', 'junit', 'xml', 'compact', 'sonarqube', 'tap', 'teamcity', 'automake')
for reporter in reporters:
filename = '{}.sw'.format(reporter)
common_args = ["~[!nonportable]~[!benchmark]~[approvals] *", "-s", "-w", "NoAssertions"] + base_args
reporter_args = ['-r', reporter]
approve(filename, common_args + reporter_args)
## All reporters at the same time
common_args = ["~[!nonportable]~[!benchmark]~[approvals] *", "-s", "-w", "NoAssertions"] + base_args
filenames = ['{}.sw.multi'.format(reporter) for reporter in reporters]
reporter_args = []
for reporter, filename in zip(reporters, filenames):
reporter_args += ['-r', '{}::out={}'.format(reporter, get_rawResultsPath(filename))]
run_test("default.sw.multi", common_args + reporter_args)
check_outputs("default.sw.multi")
for reporter, filename in zip(reporters, filenames):
check_outputs(filename)
if overallResult != 0:
print("If these differences are expected, run approve.py to approve new baselines.")
exit(2)

View File

@ -0,0 +1,31 @@
#!/usr/bin/env python3
import os
import sys
import shutil
import glob
from scriptCommon import catchPath
rootPath = os.path.join( catchPath, 'tests/SelfTest/Baselines' )
if len(sys.argv) > 1:
files = [os.path.join( rootPath, f ) for f in sys.argv[1:]]
else:
files = glob.glob( os.path.join( rootPath, "*.unapproved.txt" ) )
def approveFile( approvedFile, unapprovedFile ):
justFilename = unapprovedFile[len(rootPath)+1:]
if os.path.exists( unapprovedFile ):
if os.path.exists( approvedFile ):
os.remove( approvedFile )
os.rename( unapprovedFile, approvedFile )
print( "approved " + justFilename )
else:
print( "approval file " + justFilename + " does not exist" )
if files:
for unapprovedFile in files:
approveFile( unapprovedFile.replace( "unapproved.txt", "approved.txt" ), unapprovedFile )
else:
print( "no files to approve" )

View File

@ -0,0 +1,16 @@
rem Start at the root of the Catch project directory, for example:
rem cd Catch2
rem begin-snippet: catch2-build-and-test-win
rem 1. Regenerate the amalgamated distribution
python tools\scripts\generateAmalgamatedFiles.py
rem 2. Configure the full test build
cmake -B debug-build -S . -DCMAKE_BUILD_TYPE=Debug --preset all-tests
rem 3. Run the actual build
cmake --build debug-build
rem 4. Run the tests using CTest
ctest -j 4 --output-on-failure -C Debug --test-dir debug-build
rem end-snippet

View File

@ -0,0 +1,18 @@
#!/usr/bin/env sh
# Start at the root of the Catch project directory, for example:
# cd Catch2
# begin-snippet: catch2-build-and-test
# 1. Regenerate the amalgamated distribution
./tools/scripts/generateAmalgamatedFiles.py
# 2. Configure the full test build
cmake -B debug-build -S . -DCMAKE_BUILD_TYPE=Debug --preset all-tests
# 3. Run the actual build
cmake --build debug-build
# 4. Run the tests using CTest
ctest -j 4 --output-on-failure -C Debug --test-dir debug-build
# end-snippet

View File

@ -0,0 +1,151 @@
#!/usr/bin/env python3
"""
Checks that all of the "catch_foo_all.hpp" headers include all subheaders.
The logic is simple: given a folder, e.g. `catch2/matchers`, then the
ccorresponding header is called `catch_matchers_all.hpp` and contains
* all headers in `catch2/matchers`,
* all headers in `catch2/matchers/{internal, detail}`,
* all convenience catch_matchers_*_all.hpp headers from any non-internal subfolders
The top level header is called `catch_all.hpp`.
"""
internal_dirs = ['detail', 'internal']
from scriptCommon import catchPath
from glob import glob
from pprint import pprint
import os
import re
def normalized_path(path):
"""Replaces \ in paths on Windows with /"""
return path.replace('\\', '/')
def normalized_paths(paths):
"""Replaces \ with / in every path"""
return [normalized_path(path) for path in paths]
source_path = catchPath + '/src/catch2'
source_path = normalized_path(source_path)
include_parser = re.compile(r'#include <(catch2/.+\.hpp)>')
errors_found = False
def headers_in_folder(folder):
return glob(folder + '/*.hpp')
def folders_in_folder(folder):
return [x for x in os.scandir(folder) if x.is_dir()]
def collated_includes(folder):
base = headers_in_folder(folder)
for subfolder in folders_in_folder(folder):
if subfolder.name in internal_dirs:
base.extend(headers_in_folder(subfolder.path))
else:
base.append(subfolder.path + '/catch_{}_all.hpp'.format(subfolder.name))
return normalized_paths(sorted(base))
def includes_from_file(header):
includes = []
with open(header, 'r', encoding = 'utf-8') as file:
for line in file:
if not line.startswith('#include'):
continue
match = include_parser.match(line)
if match:
includes.append(match.group(1))
return normalized_paths(includes)
def normalize_includes(includes):
"""Returns """
return [include[len(catchPath)+5:] for include in includes]
def get_duplicates(xs):
seen = set()
duplicated = []
for x in xs:
if x in seen:
duplicated.append(x)
seen.add(x)
return duplicated
def verify_convenience_header(folder):
"""
Performs the actual checking of convenience header for specific folder.
Checks that
1) The header even exists
2) That all includes in the header are sorted
3) That there are no duplicated includes
4) That all includes that should be in the header are actually present in the header
5) That there are no superfluous includes that should not be in the header
"""
global errors_found
path = normalized_path(folder.path)
assert path.startswith(source_path), '{} does not start with {}'.format(path, source_path)
stripped_path = path[len(source_path) + 1:]
path_pieces = stripped_path.split('/')
if path == source_path:
header_name = 'catch_all.hpp'
else:
header_name = 'catch_{}_all.hpp'.format('_'.join(path_pieces))
# 1) Does it exist?
full_path = path + '/' + header_name
if not os.path.isfile(full_path):
errors_found = True
print('Missing convenience header: {}'.format(full_path))
return
file_incs = includes_from_file(path + '/' + header_name)
# 2) Are the includes are sorted?
if sorted(file_incs) != file_incs:
errors_found = True
print("'{}': Includes are not in sorted order!".format(header_name))
# 3) Are there no duplicates?
duplicated = get_duplicates(file_incs)
for duplicate in duplicated:
errors_found = True
print("'{}': Duplicated include: '{}'".format(header_name, duplicate))
target_includes = normalize_includes(collated_includes(path))
# Avoid requiring the convenience header to include itself
target_includes = [x for x in target_includes if header_name not in x]
# 4) Are all required headers present?
file_incs_set = set(file_incs)
for include in target_includes:
if (include not in file_incs_set and
include != 'catch2/internal/catch_windows_h_proxy.hpp'):
errors_found = True
print("'{}': missing include '{}'".format(header_name, include))
# 5) Are there any superfluous headers?
desired_set = set(target_includes)
for include in file_incs:
if include not in desired_set:
errors_found = True
print("'{}': superfluous include '{}'".format(header_name, include))
def walk_source_folders(current):
verify_convenience_header(current)
for folder in folders_in_folder(current.path):
fname = folder.name
if fname not in internal_dirs:
walk_source_folders(folder)
# This is an ugly hack because we cannot instantiate DirEntry manually
base_dir = [x for x in os.scandir(catchPath + '/src') if x.name == 'catch2']
walk_source_folders(base_dir[0])
# Propagate error "code" upwards
if not errors_found:
print('Everything ok')
exit(errors_found)

View File

@ -0,0 +1,14 @@
#!/usr/bin/env python3
import os
import sys
files_set = set()
for root, dir, files in os.walk("src/catch2"):
for file in files:
if file not in files_set:
files_set.add(file)
else:
print("File %s is duplicate" % file)
sys.exit(1)

View File

@ -0,0 +1,46 @@
#!/usr/bin/env python3
import sys
import glob
correct_licence = """\
// Copyright Catch2 Authors
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE.txt or copy at
// https://www.boost.org/LICENSE_1_0.txt)
// SPDX-License-Identifier: BSL-1.0
"""
def check_licence_in_file(filename: str) -> bool:
with open(filename, 'r') as f:
file_preamble = ''.join(f.readlines()[:7])
if correct_licence != file_preamble:
print('File {} does not have proper licence'.format(filename))
return False
return True
def check_licences_in_path(path: str) -> int:
failed = 0
files_to_check = glob.glob(path + '/**/*.cpp', recursive=True) \
+ glob.glob(path + '/**/*.hpp', recursive=True)
for file in files_to_check:
if not check_licence_in_file(file):
failed += 1
return failed
def check_licences():
failed = 0
# Add 'extras' after the amalgamted files are regenerated with the new script (past 3.4.0)
roots = ['src/catch2', 'tests', 'examples', 'fuzzing']
for root in roots:
failed += check_licences_in_path(root)
if failed:
print('{} files are missing licence'.format(failed))
sys.exit(1)
if __name__ == "__main__":
check_licences()

View File

@ -0,0 +1,9 @@
#!/usr/bin/env python3
import releaseCommon
v = releaseCommon.Version()
v.incrementBuildNumber()
releaseCommon.performUpdates(v)
print( "Updated files to v{0}".format( v.getVersionString() ) )

View File

@ -0,0 +1,92 @@
#!/usr/bin/env python3
#
# extractFeaturesFromReleaseNotes.py
#
# Read the release notes - docs/release-notes.md - and generate text
# for pasting in to individual documentation pages, to indicate which
# versions recent features were released in.
#
# Using the output of the file is easier than manually constructing
# the text to paste in to documentation pages.
#
# One way to use this:
# - run this script, saving the output to some temporary file
# - diff this output with the actual release notes page
# - the differences are Markdown text that can be pasted in to the
# appropriate documentation pages in the docs/ directory.
# - each release also has a github link to show which documentation files
# were changed in it.
# This can be helpful to see which documentation pages
# to add the 'Introduced in Catch ...' snippets to the relevant pages.
#
import re
def create_introduced_in_text(version, bug_number = None):
"""Generate text to paste in to documentation file"""
if bug_number:
return '> [Introduced](https://github.com/catchorg/Catch2/issues/%s) in Catch %s.' % (bug_number, version)
else:
# Use this text for changes that don't have issue numbers
return '> Introduced in Catch %s.' % version
def link_to_changes_in_release(release, releases):
"""
Markdown text for a hyperlink showing all edits in a release, or empty string
:param release: A release version, as a string
:param releases: A container of releases, in descending order - newest to oldest
:return: Markdown text for a hyperlink showing the differences between the give release and the prior one,
or empty string, if the previous release is not known
"""
if release == releases[-1]:
# This is the earliest release we know about
return ''
index = releases.index(release)
previous_release = releases[index + 1]
return '\n[Changes in %s](https://github.com/catchorg/Catch2/compare/v%s...v%s)' % (release, previous_release, release)
def write_recent_release_notes_with_introduced_text():
current_version = None
release_toc_regex = r'\[(\d.\d.\d)\]\(#\d+\)<br>'
issue_number_regex = r'#[0-9]+'
releases = []
with open('../docs/release-notes.md') as release_notes:
for line in release_notes:
line = line[:-1]
print(line)
# Extract version number from table of contents
match = re.search(release_toc_regex, line)
if match:
release_name = match.group(1)
releases.append(release_name)
if line.startswith('## '):
# It's a section with version number
current_version = line.replace('## ', '')
# We decided not to add released-date info for older versions
if current_version == 'Older versions':
break
print(create_introduced_in_text(current_version))
print(link_to_changes_in_release(current_version, releases))
# Not yet found a version number, so to avoid picking up hyperlinks to
# version numbers in the index, keep going
if not current_version:
continue
for bug_link in re.findall(issue_number_regex, line):
bug_number = bug_link.replace('#', '')
print(create_introduced_in_text(current_version, bug_number))
if __name__ == '__main__':
write_recent_release_notes_with_introduced_text()

View File

@ -0,0 +1,51 @@
#!/usr/bin/env python3
import os
from scriptCommon import catchPath
def isSourceFile( path ):
return path.endswith( ".cpp" ) or path.endswith( ".h" ) or path.endswith( ".hpp" )
def fixAllFilesInDir( dir ):
changedFiles = 0
for f in os.listdir( dir ):
path = os.path.join( dir,f )
if os.path.isfile( path ):
if isSourceFile( path ):
if fixFile( path ):
changedFiles += 1
else:
fixAllFilesInDir( path )
return changedFiles
def fixFile( path ):
f = open( path, 'r' )
lines = []
changed = 0
for line in f:
trimmed = line.rstrip() + "\n"
trimmed = trimmed.replace('\t', ' ')
if trimmed != line:
changed = changed +1
lines.append( trimmed )
f.close()
if changed > 0:
global changedFiles
changedFiles = changedFiles + 1
print( path + ":" )
print( " - fixed " + str(changed) + " line(s)" )
altPath = path + ".backup"
os.rename( path, altPath )
f2 = open( path, 'w' )
for line in lines:
f2.write( line )
f2.close()
os.remove( altPath )
return True
return False
changedFiles = fixAllFilesInDir(catchPath)
if changedFiles > 0:
print( "Fixed " + str(changedFiles) + " file(s)" )
else:
print( "No trailing whitespace found" )

View File

@ -0,0 +1,139 @@
#!/usr/bin/env python3
# Copyright Catch2 Authors
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at
# https://www.boost.org/LICENSE_1_0.txt)
# SPDX-License-Identifier: BSL-1.0
import os
import re
import datetime
from scriptCommon import catchPath
from releaseCommon import Version
root_path = os.path.join(catchPath, 'src')
starting_header = os.path.join(root_path, 'catch2', 'catch_all.hpp')
output_header = os.path.join(catchPath, 'extras', 'catch_amalgamated.hpp')
output_cpp = os.path.join(catchPath, 'extras', 'catch_amalgamated.cpp')
# REUSE-IgnoreStart
# These are the copyright comments in each file, we want to ignore them
copyright_lines = [
'// Copyright Catch2 Authors\n',
'// Distributed under the Boost Software License, Version 1.0.\n',
'// (See accompanying file LICENSE.txt or copy at\n',
'// https://www.boost.org/LICENSE_1_0.txt)\n',
'// SPDX-License-Identifier: BSL-1.0\n',
]
# The header of the amalgamated file: copyright information + explanation
# what this file is.
file_header = '''\
// Copyright Catch2 Authors
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE.txt or copy at
// https://www.boost.org/LICENSE_1_0.txt)
// SPDX-License-Identifier: BSL-1.0
// Catch v{version_string}
// Generated: {generation_time}
// ----------------------------------------------------------
// This file is an amalgamation of multiple different files.
// You probably shouldn't edit it directly.
// ----------------------------------------------------------
'''
# REUSE-IgnoreEnd
# Returns file header with proper version string and generation time
def formatted_file_header(version):
return file_header.format(version_string=version.getVersionString(),
generation_time=datetime.datetime.now())
# Which headers were already concatenated (and thus should not be
# processed again)
concatenated_headers = set()
internal_include_parser = re.compile(r'\s*#include <(catch2/.*)>.*')
def concatenate_file(out, filename: str, expand_headers: bool) -> int:
# Gathers statistics on how many headers were expanded
concatenated = 1
with open(filename, mode='r', encoding='utf-8') as input:
for line in input:
if line in copyright_lines:
continue
m = internal_include_parser.match(line)
# anything that isn't a Catch2 header can just be copied to
# the resulting file
if not m:
out.write(line)
continue
# TBD: We can also strip out include guards from our own
# headers, but it wasn't worth the time at the time of writing
# this script.
# We do not want to expand headers for the cpp file
# amalgamation but neither do we want to copy them to output
if not expand_headers:
continue
next_header = m.group(1)
# We have to avoid re-expanding the same header over and
# over again, or the header will end up with couple
# hundred thousands lines (~300k as of preview3 :-) )
if next_header in concatenated_headers:
continue
# Skip including the auto-generated user config file,
# because it has not been generated yet at this point.
# The code around it should be written so that just not including
# it is equivalent with all-default user configuration.
if next_header == 'catch2/catch_user_config.hpp':
concatenated_headers.add(next_header)
continue
concatenated_headers.add(next_header)
concatenated += concatenate_file(out, os.path.join(root_path, next_header), expand_headers)
return concatenated
def generate_header():
with open(output_header, mode='w', encoding='utf-8') as header:
header.write(formatted_file_header(Version()))
header.write('#ifndef CATCH_AMALGAMATED_HPP_INCLUDED\n')
header.write('#define CATCH_AMALGAMATED_HPP_INCLUDED\n')
print('Concatenated {} headers'.format(concatenate_file(header, starting_header, True)))
header.write('#endif // CATCH_AMALGAMATED_HPP_INCLUDED\n')
def generate_cpp():
from glob import glob
cpp_files = sorted(glob(os.path.join(root_path, 'catch2', '**/*.cpp'), recursive=True))
with open(output_cpp, mode='w', encoding='utf-8') as cpp:
cpp.write(formatted_file_header(Version()))
cpp.write('\n#include "catch_amalgamated.hpp"\n')
concatenate_file(cpp, os.path.join(root_path, 'catch2/internal/catch_windows_h_proxy.hpp'), False)
for file in cpp_files:
concatenate_file(cpp, file, False)
print('Concatenated {} cpp files'.format(len(cpp_files)))
if __name__ == "__main__":
generate_header()
generate_cpp()
# Notes:
# * For .cpp files, internal includes have to be stripped and rewritten
# * for .hpp files, internal includes have to be resolved and included
# * The .cpp file needs to start with `#include "catch_amalgamated.hpp"
# * include guards can be left/stripped, doesn't matter
# * *.cpp files should be included sorted, to minimize diffs between versions
# * *.hpp files should also be somehow sorted -> use catch_all.hpp as the
# * entrypoint
# * allow disabling main in the .cpp amalgamation

View File

@ -0,0 +1,9 @@
#!/usr/bin/env python3
import releaseCommon
v = releaseCommon.Version()
v.incrementMajorVersion()
releaseCommon.performUpdates(v)
print( "Updated files to v{0}".format( v.getVersionString() ) )

View File

@ -0,0 +1,9 @@
#!/usr/bin/env python3
import releaseCommon
v = releaseCommon.Version()
v.incrementMinorVersion()
releaseCommon.performUpdates(v)
print( "Updated files to v{0}".format( v.getVersionString() ) )

View File

@ -0,0 +1,9 @@
#!/usr/bin/env python3
import releaseCommon
v = releaseCommon.Version()
v.incrementPatchNumber()
releaseCommon.performUpdates(v)
print( "Updated files to v{0}".format( v.getVersionString() ) )

View File

@ -0,0 +1,143 @@
import os
import re
import string
import fnmatch
from scriptCommon import catchPath
versionParser = re.compile( r'(\s*static\sVersion\sversion)\s*\(\s*(.*)\s*,\s*(.*)\s*,\s*(.*)\s*,\s*\"(.*)\"\s*,\s*(.*)\s*\).*' )
rootPath = os.path.join( catchPath, 'src/catch2' )
versionPath = os.path.join( rootPath, "catch_version.cpp" )
definePath = os.path.join(rootPath, 'catch_version_macros.hpp')
readmePath = os.path.join( catchPath, "README.md" )
cmakePath = os.path.join(catchPath, 'CMakeLists.txt')
mesonPath = os.path.join(catchPath, 'meson.build')
class Version:
def __init__(self):
f = open( versionPath, 'r' )
for line in f:
m = versionParser.match( line )
if m:
self.variableDecl = m.group(1)
self.majorVersion = int(m.group(2))
self.minorVersion = int(m.group(3))
self.patchNumber = int(m.group(4))
self.branchName = m.group(5)
self.buildNumber = int(m.group(6))
f.close()
def nonDevelopRelease(self):
if self.branchName != "":
self.branchName = ""
self.buildNumber = 0
def developBuild(self):
if self.branchName == "":
self.branchName = "develop"
self.buildNumber = 0
def incrementBuildNumber(self):
self.developBuild()
self.buildNumber = self.buildNumber+1
def incrementPatchNumber(self):
self.nonDevelopRelease()
self.patchNumber = self.patchNumber+1
def incrementMinorVersion(self):
self.nonDevelopRelease()
self.patchNumber = 0
self.minorVersion = self.minorVersion+1
def incrementMajorVersion(self):
self.nonDevelopRelease()
self.patchNumber = 0
self.minorVersion = 0
self.majorVersion = self.majorVersion+1
def getVersionString(self):
versionString = '{0}.{1}.{2}'.format( self.majorVersion, self.minorVersion, self.patchNumber )
if self.branchName != "":
versionString = versionString + '-{0}.{1}'.format( self.branchName, self.buildNumber )
return versionString
def updateVersionFile(self):
f = open( versionPath, 'r' )
lines = []
for line in f:
m = versionParser.match( line )
if m:
lines.append( '{0}( {1}, {2}, {3}, "{4}", {5} );'.format( self.variableDecl, self.majorVersion, self.minorVersion, self.patchNumber, self.branchName, self.buildNumber ) )
else:
lines.append( line.rstrip() )
f.close()
f = open( versionPath, 'w' )
for line in lines:
f.write( line + "\n" )
def updateCmakeFile(version):
with open(cmakePath, 'rb') as file:
lines = file.readlines()
replacementRegex = re.compile(b'''VERSION (\\d+.\\d+.\\d+) # CML version placeholder, don't delete''')
replacement = '''VERSION {0} # CML version placeholder, don't delete'''.format(version.getVersionString()).encode('ascii')
with open(cmakePath, 'wb') as file:
for line in lines:
file.write(replacementRegex.sub(replacement, line))
def updateMesonFile(version):
with open(mesonPath, 'rb') as file:
lines = file.readlines()
replacementRegex = re.compile(b'''version\\s*:\\s*'(\\d+.\\d+.\\d+)', # CML version placeholder, don't delete''')
replacement = '''version: '{0}', # CML version placeholder, don't delete'''.format(version.getVersionString()).encode('ascii')
with open(mesonPath, 'wb') as file:
for line in lines:
file.write(replacementRegex.sub(replacement, line))
def updateVersionDefine(version):
# First member of the tuple is the compiled regex object, the second is replacement if it matches
replacementRegexes = [(re.compile(b'#define CATCH_VERSION_MAJOR \\d+'),'#define CATCH_VERSION_MAJOR {}'.format(version.majorVersion).encode('ascii')),
(re.compile(b'#define CATCH_VERSION_MINOR \\d+'),'#define CATCH_VERSION_MINOR {}'.format(version.minorVersion).encode('ascii')),
(re.compile(b'#define CATCH_VERSION_PATCH \\d+'),'#define CATCH_VERSION_PATCH {}'.format(version.patchNumber).encode('ascii')),
]
with open(definePath, 'rb') as file:
lines = file.readlines()
with open(definePath, 'wb') as file:
for line in lines:
for replacement in replacementRegexes:
line = replacement[0].sub(replacement[1], line)
file.write(line)
def updateVersionPlaceholder(filename, version):
with open(filename, 'rb') as file:
lines = file.readlines()
placeholderRegex = re.compile(b'Catch[0-9]? X.Y.Z')
replacement = 'Catch2 {}.{}.{}'.format(version.majorVersion, version.minorVersion, version.patchNumber).encode('ascii')
with open(filename, 'wb') as file:
for line in lines:
file.write(placeholderRegex.sub(replacement, line))
def updateDocumentationVersionPlaceholders(version):
print('Updating version placeholder in documentation')
docsPath = os.path.join(catchPath, 'docs/')
for basePath, _, files in os.walk(docsPath):
for file in files:
if fnmatch.fnmatch(file, "*.md") and "contributing.md" != file:
updateVersionPlaceholder(os.path.join(basePath, file), version)
def performUpdates(version):
version.updateVersionFile()
updateVersionDefine(version)
import generateAmalgamatedFiles
generateAmalgamatedFiles.generate_header()
generateAmalgamatedFiles.generate_cpp()
updateCmakeFile(version)
updateMesonFile(version)
updateDocumentationVersionPlaceholders(version)

View File

@ -0,0 +1,4 @@
import os
import sys
catchPath = os.path.dirname(os.path.dirname(os.path.realpath( os.path.dirname(sys.argv[0]))))

View File

@ -0,0 +1,23 @@
#!/usr/bin/env python3
from scriptCommon import catchPath
import os
import subprocess
# ---------------------------------------------------
# Update code examples
# ---------------------------------------------------
# For info on mdsnippets, see https://github.com/SimonCropp/MarkdownSnippets
# install dotnet SDK from http://go.microsoft.com/fwlink/?LinkID=798306&clcid=0x409
# Then install MarkdownSnippets.Tool with
# dotnet tool install -g MarkdownSnippets.Tool
# To update:
# dotnet tool update -g MarkdownSnippets.Tool
# To uninstall (e.g. to downgrade to a lower version)
# dotnet tool uninstall -g MarkdownSnippets.Tool
os.chdir(catchPath)
subprocess.run('dotnet tool update -g MarkdownSnippets.Tool --version 21.2.0', shell=True, check=True)
subprocess.run('mdsnippets', shell=True, check=True)

View File

@ -0,0 +1,447 @@
#!/usr/bin/env python3
#
# updateDocumentToC.py
#
# Insert table of contents at top of Catch markdown documents.
#
# This script is distributed under the GNU General Public License v3.0
#
# It is based on markdown-toclify version 1.7.1 by Sebastian Raschka,
# https://github.com/rasbt/markdown-toclify
#
import argparse
import glob
import os
import re
import sys
from scriptCommon import catchPath
# Configuration:
minTocEntries = 4
headingExcludeDefault = [1,3,4,5] # use level 2 headers for at default
headingExcludeRelease = [1,3,4,5] # use level 1 headers for release-notes.md
documentsDefault = os.path.join(os.path.relpath(catchPath), 'docs/*.md')
releaseNotesName = 'release-notes.md'
contentTitle = '**Contents**'
contentLineNo = 4
contentLineNdx = contentLineNo - 1
# End configuration
VALIDS = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-&'
def readLines(in_file):
"""Returns a list of lines from a input markdown file."""
with open(in_file, 'r') as inf:
in_contents = inf.read().split('\n')
return in_contents
def removeLines(lines, remove=('[[back to top]', '<a class="mk-toclify"')):
"""Removes existing [back to top] links and <a id> tags."""
if not remove:
return lines[:]
out = []
for l in lines:
if l.startswith(remove):
continue
out.append(l)
return out
def removeToC(lines):
"""Removes existing table of contents starting at index contentLineNdx."""
if not lines[contentLineNdx ].startswith(contentTitle):
return lines[:]
result_top = lines[:contentLineNdx]
pos = contentLineNdx + 1
while lines[pos].startswith('['):
pos = pos + 1
result_bottom = lines[pos + 1:]
return result_top + result_bottom
def dashifyHeadline(line):
"""
Takes a header line from a Markdown document and
returns a tuple of the
'#'-stripped version of the head line,
a string version for <a id=''></a> anchor tags,
and the level of the headline as integer.
E.g.,
>>> dashifyHeadline('### some header lvl3')
('Some header lvl3', 'some-header-lvl3', 3)
"""
stripped_right = line.rstrip('#')
stripped_both = stripped_right.lstrip('#')
level = len(stripped_right) - len(stripped_both)
stripped_wspace = stripped_both.strip()
# GitHub's sluggification works in an interesting way
# 1) '+', '/', '(', ')' and so on are just removed
# 2) spaces are converted into '-' directly
# 3) multiple -- are not collapsed
dashified = ''
for c in stripped_wspace:
if c in VALIDS:
dashified += c.lower()
elif c.isspace():
dashified += '-'
else:
# Unknown symbols are just removed
continue
return [stripped_wspace, dashified, level]
def tagAndCollect(lines, id_tag=True, back_links=False, exclude_h=None):
"""
Gets headlines from the markdown document and creates anchor tags.
Keyword arguments:
lines: a list of sublists where every sublist
represents a line from a Markdown document.
id_tag: if true, creates inserts a the <a id> tags (not req. by GitHub)
back_links: if true, adds "back to top" links below each headline
exclude_h: header levels to exclude. E.g., [2, 3]
excludes level 2 and 3 headings.
Returns a tuple of 2 lists:
1st list:
A modified version of the input list where
<a id="some-header"></a> anchor tags where inserted
above the header lines (if github is False).
2nd list:
A list of 3-value sublists, where the first value
represents the heading, the second value the string
that was inserted assigned to the IDs in the anchor tags,
and the third value is an integer that represents the headline level.
E.g.,
[['some header lvl3', 'some-header-lvl3', 3], ...]
"""
out_contents = []
headlines = []
for l in lines:
saw_headline = False
orig_len = len(l)
l_stripped = l.lstrip()
if l_stripped.startswith(('# ', '## ', '### ', '#### ', '##### ', '###### ')):
# comply with new markdown standards
# not a headline if '#' not followed by whitespace '##no-header':
if not l.lstrip('#').startswith(' '):
continue
# not a headline if more than 6 '#':
if len(l) - len(l.lstrip('#')) > 6:
continue
# headers can be indented by at most 3 spaces:
if orig_len - len(l_stripped) > 3:
continue
# ignore empty headers
if not set(l) - {'#', ' '}:
continue
saw_headline = True
dashified = dashifyHeadline(l)
if not exclude_h or not dashified[-1] in exclude_h:
if id_tag:
id_tag = '<a class="mk-toclify" id="%s"></a>'\
% (dashified[1])
out_contents.append(id_tag)
headlines.append(dashified)
out_contents.append(l)
if back_links and saw_headline:
out_contents.append('[[back to top](#table-of-contents)]')
return out_contents, headlines
def positioningHeadlines(headlines):
"""
Strips unnecessary whitespaces/tabs if first header is not left-aligned
"""
left_just = False
for row in headlines:
if row[-1] == 1:
left_just = True
break
if not left_just:
for row in headlines:
row[-1] -= 1
return headlines
def createToc(headlines, hyperlink=True, top_link=False, no_toc_header=False):
"""
Creates the table of contents from the headline list
that was returned by the tagAndCollect function.
Keyword Arguments:
headlines: list of lists
e.g., ['Some header lvl3', 'some-header-lvl3', 3]
hyperlink: Creates hyperlinks in Markdown format if True,
e.g., '- [Some header lvl1](#some-header-lvl1)'
top_link: if True, add a id tag for linking the table
of contents itself (for the back-to-top-links)
no_toc_header: suppresses TOC header if True.
Returns a list of headlines for a table of contents
in Markdown format,
e.g., [' - [Some header lvl3](#some-header-lvl3)', ...]
"""
processed = []
if not no_toc_header:
if top_link:
processed.append('<a class="mk-toclify" id="table-of-contents"></a>\n')
processed.append(contentTitle + '<br>')
for line in headlines:
if hyperlink:
item = '[%s](#%s)' % (line[0], line[1])
else:
item = '%s- %s' % ((line[2]-1)*' ', line[0])
processed.append(item + '<br>')
processed.append('\n')
return processed
def buildMarkdown(toc_headlines, body, spacer=0, placeholder=None):
"""
Returns a string with the Markdown output contents incl.
the table of contents.
Keyword arguments:
toc_headlines: lines for the table of contents
as created by the createToc function.
body: contents of the Markdown file including
ID-anchor tags as returned by the
tagAndCollect function.
spacer: Adds vertical space after the table
of contents. Height in pixels.
placeholder: If a placeholder string is provided, the placeholder
will be replaced by the TOC instead of inserting the TOC at
the top of the document
"""
if spacer:
spacer_line = ['\n<div style="height:%spx;"></div>\n' % (spacer)]
toc_markdown = "\n".join(toc_headlines + spacer_line)
else:
toc_markdown = "\n".join(toc_headlines)
if placeholder:
body_markdown = "\n".join(body)
markdown = body_markdown.replace(placeholder, toc_markdown)
else:
body_markdown_p1 = "\n".join(body[:contentLineNdx ]) + '\n'
body_markdown_p2 = "\n".join(body[ contentLineNdx:])
markdown = body_markdown_p1 + toc_markdown + body_markdown_p2
return markdown
def outputMarkdown(markdown_cont, output_file):
"""
Writes to an output file if `outfile` is a valid path.
"""
if output_file:
with open(output_file, 'w') as out:
out.write(markdown_cont)
def markdownToclify(
input_file,
output_file=None,
min_toc_len=2,
github=False,
back_to_top=False,
nolink=False,
no_toc_header=False,
spacer=0,
placeholder=None,
exclude_h=None):
""" Function to add table of contents to markdown files.
Parameters
-----------
input_file: str
Path to the markdown input file.
output_file: str (default: None)
Path to the markdown output file.
min_toc_len: int (default: 2)
Minimum number of entries to create a table of contents for.
github: bool (default: False)
Uses GitHub TOC syntax if True.
back_to_top: bool (default: False)
Inserts back-to-top links below headings if True.
nolink: bool (default: False)
Creates the table of contents without internal links if True.
no_toc_header: bool (default: False)
Suppresses the Table of Contents header if True
spacer: int (default: 0)
Inserts horizontal space (in pixels) after the table of contents.
placeholder: str (default: None)
Inserts the TOC at the placeholder string instead
of inserting the TOC at the top of the document.
exclude_h: list (default None)
Excludes header levels, e.g., if [2, 3], ignores header
levels 2 and 3 in the TOC.
Returns
-----------
changed: Boolean
True if the file has been updated, False otherwise.
"""
cleaned_contents = removeLines(
removeToC(readLines(input_file)),
remove=('[[back to top]', '<a class="mk-toclify"'))
processed_contents, raw_headlines = tagAndCollect(
cleaned_contents,
id_tag=not github,
back_links=back_to_top,
exclude_h=exclude_h)
# add table of contents?
if len(raw_headlines) < min_toc_len:
processed_headlines = []
else:
leftjustified_headlines = positioningHeadlines(raw_headlines)
processed_headlines = createToc(
leftjustified_headlines,
hyperlink=not nolink,
top_link=not nolink and not github,
no_toc_header=no_toc_header)
if nolink:
processed_contents = cleaned_contents
cont = buildMarkdown(
toc_headlines=processed_headlines,
body=processed_contents,
spacer=spacer,
placeholder=placeholder)
if output_file:
outputMarkdown(cont, output_file)
def isReleaseNotes(f):
return os.path.basename(f) == releaseNotesName
def excludeHeadingsFor(f):
return headingExcludeRelease if isReleaseNotes(f) else headingExcludeDefault
def updateSingleDocumentToC(input_file, min_toc_len, verbose=False):
"""Add or update table of contents in specified file. Return 1 if file changed, 0 otherwise."""
if verbose :
print( 'file: {}'.format(input_file))
output_file = input_file + '.tmp'
markdownToclify(
input_file=input_file,
output_file=output_file,
min_toc_len=min_toc_len,
github=True,
back_to_top=False,
nolink=False,
no_toc_header=False,
spacer=False,
placeholder=False,
exclude_h=excludeHeadingsFor(input_file))
# prevent race-condition (Python 3.3):
if sys.version_info >= (3, 3):
os.replace(output_file, input_file)
else:
os.remove(input_file)
os.rename(output_file, input_file)
return 1
def updateDocumentToC(paths, min_toc_len, verbose):
"""Add or update table of contents to specified paths. Return number of changed files"""
n = 0
for g in paths:
for f in glob.glob(g):
if os.path.isfile(f):
n = n + updateSingleDocumentToC(input_file=f, min_toc_len=min_toc_len, verbose=verbose)
return n
def updateDocumentToCMain():
"""Add or update table of contents to specified paths."""
parser = argparse.ArgumentParser(
description='Add or update table of contents in markdown documents.',
epilog="""""",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'Input',
metavar='file',
type=str,
nargs=argparse.REMAINDER,
help='files to process, at default: docs/*.md')
parser.add_argument(
'-v', '--verbose',
action='store_true',
help='report the name of the file being processed')
parser.add_argument(
'--min-toc-entries',
dest='minTocEntries',
default=minTocEntries,
type=int,
metavar='N',
help='the minimum number of entries to create a table of contents for [{default}]'.format(default=minTocEntries))
parser.add_argument(
'--remove-toc',
action='store_const',
dest='minTocEntries',
const=99,
help='remove all tables of contents')
args = parser.parse_args()
paths = args.Input if args.Input else [documentsDefault]
changedFiles = updateDocumentToC(paths=paths, min_toc_len=args.minTocEntries, verbose=args.verbose)
if changedFiles > 0:
print( "Processed table of contents in " + str(changedFiles) + " file(s)" )
else:
print( "No table of contents added or updated" )
if __name__ == '__main__':
updateDocumentToCMain()
# end of file