Merge branch 'master' into master
This commit is contained in:
commit
1577a34c83
18
.github/workflows/cmake.yml
vendored
Normal file
18
.github/workflows/cmake.yml
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
name: cmake
|
||||
on: [check_run, push, pull_request]
|
||||
jobs:
|
||||
cmake-publish:
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest, windows-latest, macos-latest]
|
||||
|
||||
steps:
|
||||
- name: checkout project
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: build project
|
||||
uses: threeal/cmake-action@v2.0.0
|
||||
|
36
.github/workflows/meson.yml
vendored
36
.github/workflows/meson.yml
vendored
@ -1,9 +1,9 @@
|
||||
name: meson build and test
|
||||
run-name: update pushed to ${{ github.ref }}
|
||||
on: [check_run, pull_request, push]
|
||||
on: [check_run, push, pull_request]
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
meson-publish:
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
strategy:
|
||||
@ -31,3 +31,35 @@ jobs:
|
||||
meson-version: 1.5.1
|
||||
ninja-version: 1.11.1.1
|
||||
action: test
|
||||
|
||||
meson-coverage:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: setup python
|
||||
uses: actions/setup-python@v5
|
||||
|
||||
- name: meson build
|
||||
uses: BSFishy/meson-build@v1.0.3
|
||||
with:
|
||||
meson-version: 1.5.1
|
||||
ninja-version: 1.11.1.1
|
||||
setup-options: -Db_coverage=true
|
||||
action: build
|
||||
|
||||
- name: meson test
|
||||
uses: BSFishy/meson-build@v1.0.3
|
||||
with:
|
||||
meson-version: 1.5.1
|
||||
ninja-version: 1.11.1.1
|
||||
setup-options: -Db_coverage=true
|
||||
action: test
|
||||
|
||||
- name: generate code coverage report
|
||||
uses: threeal/gcovr-action@v1.0.0
|
||||
with:
|
||||
coveralls-send: true
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
@ -54,16 +54,6 @@ endif()
|
||||
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# use ccache if found, has to be done before project()
|
||||
# ---------------------------------------------------------------------------
|
||||
find_program(CCACHE_EXECUTABLE "ccache" HINTS /usr/local/bin /opt/local/bin)
|
||||
if(CCACHE_EXECUTABLE)
|
||||
message(STATUS "use ccache")
|
||||
set(CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_EXECUTABLE}" CACHE PATH "ccache" FORCE)
|
||||
set(CMAKE_C_COMPILER_LAUNCHER "${CCACHE_EXECUTABLE}" CACHE PATH "ccache" FORCE)
|
||||
endif()
|
||||
|
||||
project(jsoncpp
|
||||
# Note: version must be updated in three places when doing a release. This
|
||||
# annoying process ensures that amalgamate, CMake, and meson all report the
|
||||
@ -103,7 +93,9 @@ if(CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
|
||||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin" CACHE PATH "Executable/dll output dir.")
|
||||
endif()
|
||||
|
||||
set(JSONCPP_USE_SECURE_MEMORY "0" CACHE STRING "-D...=1 to use memory-wiping allocator for STL")
|
||||
if(JSONCPP_USE_SECURE_MEMORY)
|
||||
add_definitions("-DJSONCPP_USE_SECURE_MEMORY=1")
|
||||
endif()
|
||||
|
||||
configure_file("${PROJECT_SOURCE_DIR}/version.in"
|
||||
"${PROJECT_BINARY_DIR}/version"
|
||||
|
17
SECURITY.md
Normal file
17
SECURITY.md
Normal file
@ -0,0 +1,17 @@
|
||||
# Security Policy
|
||||
|
||||
If you have discovered a security vulnerability in this project, please report it
|
||||
privately. **Do not disclose it as a public issue.** This gives us time to work with you
|
||||
to fix the issue before public exposure, reducing the chance that the exploit will be
|
||||
used before a patch is released.
|
||||
|
||||
Please submit the report by filling out
|
||||
[this form](https://github.com/open-source-parsers/jsoncpp/security/advisories/new).
|
||||
|
||||
Please provide the following information in your report:
|
||||
|
||||
- A description of the vulnerability and its impact
|
||||
- How to reproduce the issue
|
||||
|
||||
This project is maintained by volunteers on a reasonable-effort basis. As such,
|
||||
we ask that you give us 90 days to work on a fix before public exposure.
|
@ -63,7 +63,7 @@ def amalgamate_source(source_top_dir=None,
|
||||
"""
|
||||
print("Amalgamating header...")
|
||||
header = AmalgamationFile(source_top_dir)
|
||||
header.add_text("/// Json-cpp amalgamated header (http://jsoncpp.sourceforge.net/).")
|
||||
header.add_text("/// Json-cpp amalgamated header (https://github.com/open-source-parsers/jsoncpp/).")
|
||||
header.add_text('/// It is intended to be used with #include "%s"' % header_include_path)
|
||||
header.add_file("LICENSE", wrap_in_comment=True)
|
||||
header.add_text("#ifndef JSON_AMALGAMATED_H_INCLUDED")
|
||||
@ -90,7 +90,7 @@ def amalgamate_source(source_top_dir=None,
|
||||
forward_header_include_path = base + "-forwards" + ext
|
||||
print("Amalgamating forward header...")
|
||||
header = AmalgamationFile(source_top_dir)
|
||||
header.add_text("/// Json-cpp amalgamated forward header (http://jsoncpp.sourceforge.net/).")
|
||||
header.add_text("/// Json-cpp amalgamated forward header (https://github.com/open-source-parsers/jsoncpp/).")
|
||||
header.add_text('/// It is intended to be used with #include "%s"' % forward_header_include_path)
|
||||
header.add_text("/// This header provides forward declaration for all JsonCpp types.")
|
||||
header.add_file("LICENSE", wrap_in_comment=True)
|
||||
@ -112,7 +112,7 @@ def amalgamate_source(source_top_dir=None,
|
||||
|
||||
print("Amalgamating source...")
|
||||
source = AmalgamationFile(source_top_dir)
|
||||
source.add_text("/// Json-cpp amalgamated source (http://jsoncpp.sourceforge.net/).")
|
||||
source.add_text("/// Json-cpp amalgamated source (https://github.com/open-source-parsers/jsoncpp/).")
|
||||
source.add_text('/// It is intended to be used with #include "%s"' % header_include_path)
|
||||
source.add_file("LICENSE", wrap_in_comment=True)
|
||||
source.add_text("")
|
||||
|
@ -25,7 +25,7 @@ int main() {
|
||||
const std::unique_ptr<Json::CharReader> reader(builder.newCharReader());
|
||||
if (!reader->parse(rawJson.c_str(), rawJson.c_str() + rawJsonLength, &root,
|
||||
&err)) {
|
||||
std::cout << "error" << std::endl;
|
||||
std::cout << "error: " << err << std::endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
|
@ -2,8 +2,8 @@
|
||||
# This function will prevent in-source builds
|
||||
function(AssureOutOfSourceBuilds)
|
||||
# make sure the user doesn't play dirty with symlinks
|
||||
get_filename_component(srcdir "${CMAKE_SOURCE_DIR}" REALPATH)
|
||||
get_filename_component(bindir "${CMAKE_BINARY_DIR}" REALPATH)
|
||||
get_filename_component(srcdir "${CMAKE_CURRENT_SOURCE_DIR}" REALPATH)
|
||||
get_filename_component(bindir "${CMAKE_CURRENT_BINARY_DIR}" REALPATH)
|
||||
|
||||
# disallow in-source builds
|
||||
if("${srcdir}" STREQUAL "${bindir}")
|
||||
|
@ -190,6 +190,7 @@ private:
|
||||
using Errors = std::deque<ErrorInfo>;
|
||||
|
||||
bool readToken(Token& token);
|
||||
bool readTokenSkippingComments(Token& token);
|
||||
void skipSpaces();
|
||||
bool match(const Char* pattern, int patternLength);
|
||||
bool readComment();
|
||||
@ -221,7 +222,6 @@ private:
|
||||
int& column) const;
|
||||
String getLocationLineAndColumn(Location location) const;
|
||||
void addComment(Location begin, Location end, CommentPlacement placement);
|
||||
void skipCommentTokens(Token& token);
|
||||
|
||||
static bool containsNewLine(Location begin, Location end);
|
||||
static String normalizeEOL(Location begin, Location end);
|
||||
@ -244,6 +244,12 @@ private:
|
||||
*/
|
||||
class JSON_API CharReader {
|
||||
public:
|
||||
struct JSON_API StructuredError {
|
||||
ptrdiff_t offset_start;
|
||||
ptrdiff_t offset_limit;
|
||||
String message;
|
||||
};
|
||||
|
||||
virtual ~CharReader() = default;
|
||||
/** \brief Read a Value from a <a HREF="http://www.json.org">JSON</a>
|
||||
* document. The document must be a UTF-8 encoded string containing the
|
||||
@ -262,7 +268,12 @@ public:
|
||||
* error occurred.
|
||||
*/
|
||||
virtual bool parse(char const* beginDoc, char const* endDoc, Value* root,
|
||||
String* errs) = 0;
|
||||
String* errs);
|
||||
|
||||
/** \brief Returns a vector of structured errors encountered while parsing.
|
||||
* Each parse call resets the stored list of errors.
|
||||
*/
|
||||
std::vector<StructuredError> getStructuredErrors() const;
|
||||
|
||||
class JSON_API Factory {
|
||||
public:
|
||||
@ -272,6 +283,20 @@ public:
|
||||
*/
|
||||
virtual CharReader* newCharReader() const = 0;
|
||||
}; // Factory
|
||||
|
||||
protected:
|
||||
class Impl {
|
||||
public:
|
||||
virtual ~Impl() = default;
|
||||
virtual bool parse(char const* beginDoc, char const* endDoc, Value* root,
|
||||
String* errs) = 0;
|
||||
virtual std::vector<StructuredError> getStructuredErrors() const = 0;
|
||||
};
|
||||
|
||||
explicit CharReader(std::unique_ptr<Impl> impl) : _impl(std::move(impl)) {}
|
||||
|
||||
private:
|
||||
std::unique_ptr<Impl> _impl;
|
||||
}; // CharReader
|
||||
|
||||
/** \brief Build a CharReader implementation.
|
||||
|
@ -18,10 +18,9 @@
|
||||
((JSONCPP_VERSION_MAJOR << 24) | (JSONCPP_VERSION_MINOR << 16) | \
|
||||
(JSONCPP_VERSION_PATCH << 8))
|
||||
|
||||
#ifdef JSONCPP_USING_SECURE_MEMORY
|
||||
#undef JSONCPP_USING_SECURE_MEMORY
|
||||
#endif
|
||||
#if !defined(JSONCPP_USE_SECURE_MEMORY)
|
||||
#define JSONCPP_USING_SECURE_MEMORY 0
|
||||
#endif
|
||||
// If non-zero, the library zeroes any memory that it has allocated before
|
||||
// it frees its memory.
|
||||
|
||||
|
@ -351,6 +351,7 @@ String JSON_API valueToString(
|
||||
PrecisionType precisionType = PrecisionType::significantDigits);
|
||||
String JSON_API valueToString(bool value);
|
||||
String JSON_API valueToQuotedString(const char* value);
|
||||
String JSON_API valueToQuotedString(const char* value, size_t length);
|
||||
|
||||
/// \brief Output using the StyledStreamWriter.
|
||||
/// \see Json::operator>>()
|
||||
|
@ -1,3 +1,4 @@
|
||||
if (NOT TARGET JsonCpp::JsonCpp)
|
||||
if (TARGET jsoncpp_static)
|
||||
add_library(JsonCpp::JsonCpp INTERFACE IMPORTED)
|
||||
set_target_properties(JsonCpp::JsonCpp PROPERTIES INTERFACE_LINK_LIBRARIES "jsoncpp_static")
|
||||
@ -5,3 +6,4 @@ elseif (TARGET jsoncpp_lib)
|
||||
add_library(JsonCpp::JsonCpp INTERFACE IMPORTED)
|
||||
set_target_properties(JsonCpp::JsonCpp PROPERTIES INTERFACE_LINK_LIBRARIES "jsoncpp_lib")
|
||||
endif ()
|
||||
endif ()
|
||||
|
@ -1,5 +1,5 @@
|
||||
cmake_policy(PUSH)
|
||||
cmake_policy(VERSION 3.0)
|
||||
cmake_policy(VERSION 3.0...3.26)
|
||||
|
||||
@PACKAGE_INIT@
|
||||
|
||||
|
@ -73,7 +73,7 @@ if meson.is_subproject() or not get_option('tests')
|
||||
subdir_done()
|
||||
endif
|
||||
|
||||
python = import('python').find_installation()
|
||||
python = find_program('python3')
|
||||
|
||||
jsoncpp_test = executable(
|
||||
'jsoncpp_test', files([
|
||||
|
@ -240,11 +240,14 @@ static int parseCommandLine(int argc, const char* argv[], Options* opts) {
|
||||
return printUsage(argv);
|
||||
}
|
||||
int index = 1;
|
||||
if (Json::String(argv[index]) == "--json-checker") {
|
||||
opts->features = Json::Features::strictMode();
|
||||
if (Json::String(argv[index]) == "--parse-only") {
|
||||
opts->parseOnly = true;
|
||||
++index;
|
||||
}
|
||||
if (Json::String(argv[index]) == "--strict") {
|
||||
opts->features = Json::Features::strictMode();
|
||||
++index;
|
||||
}
|
||||
if (Json::String(argv[index]) == "--json-config") {
|
||||
printConfig();
|
||||
return 3;
|
||||
|
@ -144,7 +144,7 @@ if(BUILD_STATIC_LIBS)
|
||||
|
||||
# avoid name clashes on windows as the shared import lib is also named jsoncpp.lib
|
||||
if(NOT DEFINED STATIC_SUFFIX AND BUILD_SHARED_LIBS)
|
||||
if (MSVC)
|
||||
if (WIN32)
|
||||
set(STATIC_SUFFIX "_static")
|
||||
else()
|
||||
set(STATIC_SUFFIX "")
|
||||
|
@ -129,7 +129,7 @@ bool Reader::parse(const char* beginDoc, const char* endDoc, Value& root,
|
||||
|
||||
bool successful = readValue();
|
||||
Token token;
|
||||
skipCommentTokens(token);
|
||||
readTokenSkippingComments(token);
|
||||
if (collectComments_ && !commentsBefore_.empty())
|
||||
root.setComment(commentsBefore_, commentAfter);
|
||||
if (features_.strictRoot_) {
|
||||
@ -157,7 +157,7 @@ bool Reader::readValue() {
|
||||
throwRuntimeError("Exceeded stackLimit in readValue().");
|
||||
|
||||
Token token;
|
||||
skipCommentTokens(token);
|
||||
readTokenSkippingComments(token);
|
||||
bool successful = true;
|
||||
|
||||
if (collectComments_ && !commentsBefore_.empty()) {
|
||||
@ -225,15 +225,15 @@ bool Reader::readValue() {
|
||||
return successful;
|
||||
}
|
||||
|
||||
void Reader::skipCommentTokens(Token& token) {
|
||||
bool Reader::readTokenSkippingComments(Token& token) {
|
||||
bool success = readToken(token);
|
||||
if (features_.allowComments_) {
|
||||
do {
|
||||
readToken(token);
|
||||
} while (token.type_ == tokenComment);
|
||||
} else {
|
||||
readToken(token);
|
||||
while (success && token.type_ == tokenComment) {
|
||||
success = readToken(token);
|
||||
}
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
bool Reader::readToken(Token& token) {
|
||||
skipSpaces();
|
||||
@ -446,12 +446,7 @@ bool Reader::readObject(Token& token) {
|
||||
Value init(objectValue);
|
||||
currentValue().swapPayload(init);
|
||||
currentValue().setOffsetStart(token.start_ - begin_);
|
||||
while (readToken(tokenName)) {
|
||||
bool initialTokenOk = true;
|
||||
while (tokenName.type_ == tokenComment && initialTokenOk)
|
||||
initialTokenOk = readToken(tokenName);
|
||||
if (!initialTokenOk)
|
||||
break;
|
||||
while (readTokenSkippingComments(tokenName)) {
|
||||
if (tokenName.type_ == tokenObjectEnd && name.empty()) // empty object
|
||||
return true;
|
||||
name.clear();
|
||||
@ -480,15 +475,11 @@ bool Reader::readObject(Token& token) {
|
||||
return recoverFromError(tokenObjectEnd);
|
||||
|
||||
Token comma;
|
||||
if (!readToken(comma) ||
|
||||
(comma.type_ != tokenObjectEnd && comma.type_ != tokenArraySeparator &&
|
||||
comma.type_ != tokenComment)) {
|
||||
if (!readTokenSkippingComments(comma) ||
|
||||
(comma.type_ != tokenObjectEnd && comma.type_ != tokenArraySeparator)) {
|
||||
return addErrorAndRecover("Missing ',' or '}' in object declaration",
|
||||
comma, tokenObjectEnd);
|
||||
}
|
||||
bool finalizeTokenOk = true;
|
||||
while (comma.type_ == tokenComment && finalizeTokenOk)
|
||||
finalizeTokenOk = readToken(comma);
|
||||
if (comma.type_ == tokenObjectEnd)
|
||||
return true;
|
||||
}
|
||||
@ -518,10 +509,7 @@ bool Reader::readArray(Token& token) {
|
||||
|
||||
Token currentToken;
|
||||
// Accept Comment after last item in the array.
|
||||
ok = readToken(currentToken);
|
||||
while (currentToken.type_ == tokenComment && ok) {
|
||||
ok = readToken(currentToken);
|
||||
}
|
||||
ok = readTokenSkippingComments(currentToken);
|
||||
bool badTokenType = (currentToken.type_ != tokenArraySeparator &&
|
||||
currentToken.type_ != tokenArrayEnd);
|
||||
if (!ok || badTokenType) {
|
||||
@ -599,8 +587,7 @@ bool Reader::decodeDouble(Token& token) {
|
||||
|
||||
bool Reader::decodeDouble(Token& token, Value& decoded) {
|
||||
double value = 0;
|
||||
String buffer(token.start_, token.end_);
|
||||
IStringStream is(buffer);
|
||||
IStringStream is(String(token.start_, token.end_));
|
||||
if (!(is >> value)) {
|
||||
if (value == std::numeric_limits<double>::max())
|
||||
value = std::numeric_limits<double>::infinity();
|
||||
@ -773,7 +760,7 @@ void Reader::getLocationLineAndColumn(Location location, int& line,
|
||||
while (current < location && current != end_) {
|
||||
Char c = *current++;
|
||||
if (c == '\r') {
|
||||
if (*current == '\n')
|
||||
if (current != end_ && *current == '\n')
|
||||
++current;
|
||||
lastLineStart = current;
|
||||
++line;
|
||||
@ -890,17 +877,12 @@ class OurReader {
|
||||
public:
|
||||
using Char = char;
|
||||
using Location = const Char*;
|
||||
struct StructuredError {
|
||||
ptrdiff_t offset_start;
|
||||
ptrdiff_t offset_limit;
|
||||
String message;
|
||||
};
|
||||
|
||||
explicit OurReader(OurFeatures const& features);
|
||||
bool parse(const char* beginDoc, const char* endDoc, Value& root,
|
||||
bool collectComments = true);
|
||||
String getFormattedErrorMessages() const;
|
||||
std::vector<StructuredError> getStructuredErrors() const;
|
||||
std::vector<CharReader::StructuredError> getStructuredErrors() const;
|
||||
|
||||
private:
|
||||
OurReader(OurReader const&); // no impl
|
||||
@ -943,6 +925,7 @@ private:
|
||||
using Errors = std::deque<ErrorInfo>;
|
||||
|
||||
bool readToken(Token& token);
|
||||
bool readTokenSkippingComments(Token& token);
|
||||
void skipSpaces();
|
||||
void skipBom(bool skipBom);
|
||||
bool match(const Char* pattern, int patternLength);
|
||||
@ -976,7 +959,6 @@ private:
|
||||
int& column) const;
|
||||
String getLocationLineAndColumn(Location location) const;
|
||||
void addComment(Location begin, Location end, CommentPlacement placement);
|
||||
void skipCommentTokens(Token& token);
|
||||
|
||||
static String normalizeEOL(Location begin, Location end);
|
||||
static bool containsNewLine(Location begin, Location end);
|
||||
@ -1030,7 +1012,7 @@ bool OurReader::parse(const char* beginDoc, const char* endDoc, Value& root,
|
||||
bool successful = readValue();
|
||||
nodes_.pop();
|
||||
Token token;
|
||||
skipCommentTokens(token);
|
||||
readTokenSkippingComments(token);
|
||||
if (features_.failIfExtra_ && (token.type_ != tokenEndOfStream)) {
|
||||
addError("Extra non-whitespace after JSON value.", token);
|
||||
return false;
|
||||
@ -1058,7 +1040,7 @@ bool OurReader::readValue() {
|
||||
if (nodes_.size() > features_.stackLimit_)
|
||||
throwRuntimeError("Exceeded stackLimit in readValue().");
|
||||
Token token;
|
||||
skipCommentTokens(token);
|
||||
readTokenSkippingComments(token);
|
||||
bool successful = true;
|
||||
|
||||
if (collectComments_ && !commentsBefore_.empty()) {
|
||||
@ -1145,15 +1127,15 @@ bool OurReader::readValue() {
|
||||
return successful;
|
||||
}
|
||||
|
||||
void OurReader::skipCommentTokens(Token& token) {
|
||||
bool OurReader::readTokenSkippingComments(Token& token) {
|
||||
bool success = readToken(token);
|
||||
if (features_.allowComments_) {
|
||||
do {
|
||||
readToken(token);
|
||||
} while (token.type_ == tokenComment);
|
||||
} else {
|
||||
readToken(token);
|
||||
while (success && token.type_ == tokenComment) {
|
||||
success = readToken(token);
|
||||
}
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
bool OurReader::readToken(Token& token) {
|
||||
skipSpaces();
|
||||
@ -1449,12 +1431,7 @@ bool OurReader::readObject(Token& token) {
|
||||
Value init(objectValue);
|
||||
currentValue().swapPayload(init);
|
||||
currentValue().setOffsetStart(token.start_ - begin_);
|
||||
while (readToken(tokenName)) {
|
||||
bool initialTokenOk = true;
|
||||
while (tokenName.type_ == tokenComment && initialTokenOk)
|
||||
initialTokenOk = readToken(tokenName);
|
||||
if (!initialTokenOk)
|
||||
break;
|
||||
while (readTokenSkippingComments(tokenName)) {
|
||||
if (tokenName.type_ == tokenObjectEnd &&
|
||||
(name.empty() ||
|
||||
features_.allowTrailingCommas_)) // empty object or trailing comma
|
||||
@ -1491,15 +1468,11 @@ bool OurReader::readObject(Token& token) {
|
||||
return recoverFromError(tokenObjectEnd);
|
||||
|
||||
Token comma;
|
||||
if (!readToken(comma) ||
|
||||
(comma.type_ != tokenObjectEnd && comma.type_ != tokenArraySeparator &&
|
||||
comma.type_ != tokenComment)) {
|
||||
if (!readTokenSkippingComments(comma) ||
|
||||
(comma.type_ != tokenObjectEnd && comma.type_ != tokenArraySeparator)) {
|
||||
return addErrorAndRecover("Missing ',' or '}' in object declaration",
|
||||
comma, tokenObjectEnd);
|
||||
}
|
||||
bool finalizeTokenOk = true;
|
||||
while (comma.type_ == tokenComment && finalizeTokenOk)
|
||||
finalizeTokenOk = readToken(comma);
|
||||
if (comma.type_ == tokenObjectEnd)
|
||||
return true;
|
||||
}
|
||||
@ -1533,10 +1506,7 @@ bool OurReader::readArray(Token& token) {
|
||||
|
||||
Token currentToken;
|
||||
// Accept Comment after last item in the array.
|
||||
ok = readToken(currentToken);
|
||||
while (currentToken.type_ == tokenComment && ok) {
|
||||
ok = readToken(currentToken);
|
||||
}
|
||||
ok = readTokenSkippingComments(currentToken);
|
||||
bool badTokenType = (currentToken.type_ != tokenArraySeparator &&
|
||||
currentToken.type_ != tokenArrayEnd);
|
||||
if (!ok || badTokenType) {
|
||||
@ -1651,8 +1621,7 @@ bool OurReader::decodeDouble(Token& token) {
|
||||
|
||||
bool OurReader::decodeDouble(Token& token, Value& decoded) {
|
||||
double value = 0;
|
||||
const String buffer(token.start_, token.end_);
|
||||
IStringStream is(buffer);
|
||||
IStringStream is(String(token.start_, token.end_));
|
||||
if (!(is >> value)) {
|
||||
if (value == std::numeric_limits<double>::max())
|
||||
value = std::numeric_limits<double>::infinity();
|
||||
@ -1825,7 +1794,7 @@ void OurReader::getLocationLineAndColumn(Location location, int& line,
|
||||
while (current < location && current != end_) {
|
||||
Char c = *current++;
|
||||
if (c == '\r') {
|
||||
if (*current == '\n')
|
||||
if (current != end_ && *current == '\n')
|
||||
++current;
|
||||
lastLineStart = current;
|
||||
++line;
|
||||
@ -1860,10 +1829,11 @@ String OurReader::getFormattedErrorMessages() const {
|
||||
return formattedMessage;
|
||||
}
|
||||
|
||||
std::vector<OurReader::StructuredError> OurReader::getStructuredErrors() const {
|
||||
std::vector<OurReader::StructuredError> allErrors;
|
||||
std::vector<CharReader::StructuredError>
|
||||
OurReader::getStructuredErrors() const {
|
||||
std::vector<CharReader::StructuredError> allErrors;
|
||||
for (const auto& error : errors_) {
|
||||
OurReader::StructuredError structured;
|
||||
CharReader::StructuredError structured;
|
||||
structured.offset_start = error.token_.start_ - begin_;
|
||||
structured.offset_limit = error.token_.end_ - begin_;
|
||||
structured.message = error.message_;
|
||||
@ -1873,12 +1843,18 @@ std::vector<OurReader::StructuredError> OurReader::getStructuredErrors() const {
|
||||
}
|
||||
|
||||
class OurCharReader : public CharReader {
|
||||
bool const collectComments_;
|
||||
OurReader reader_;
|
||||
|
||||
public:
|
||||
OurCharReader(bool collectComments, OurFeatures const& features)
|
||||
: CharReader(
|
||||
std::unique_ptr<OurImpl>(new OurImpl(collectComments, features))) {}
|
||||
|
||||
protected:
|
||||
class OurImpl : public Impl {
|
||||
public:
|
||||
OurImpl(bool collectComments, OurFeatures const& features)
|
||||
: collectComments_(collectComments), reader_(features) {}
|
||||
|
||||
bool parse(char const* beginDoc, char const* endDoc, Value* root,
|
||||
String* errs) override {
|
||||
bool ok = reader_.parse(beginDoc, endDoc, *root, collectComments_);
|
||||
@ -1887,6 +1863,16 @@ public:
|
||||
}
|
||||
return ok;
|
||||
}
|
||||
|
||||
std::vector<CharReader::StructuredError>
|
||||
getStructuredErrors() const override {
|
||||
return reader_.getStructuredErrors();
|
||||
}
|
||||
|
||||
private:
|
||||
bool const collectComments_;
|
||||
OurReader reader_;
|
||||
};
|
||||
};
|
||||
|
||||
CharReaderBuilder::CharReaderBuilder() { setDefaults(&settings_); }
|
||||
@ -1976,6 +1962,16 @@ void CharReaderBuilder::setDefaults(Json::Value* settings) {
|
||||
//! [CharReaderBuilderDefaults]
|
||||
}
|
||||
|
||||
std::vector<CharReader::StructuredError>
|
||||
CharReader::getStructuredErrors() const {
|
||||
return _impl->getStructuredErrors();
|
||||
}
|
||||
|
||||
bool CharReader::parse(char const* beginDoc, char const* endDoc, Value* root,
|
||||
String* errs) {
|
||||
return _impl->parse(beginDoc, endDoc, root, errs);
|
||||
}
|
||||
|
||||
//////////////////////////////////
|
||||
// global functions
|
||||
|
||||
@ -1983,7 +1979,7 @@ bool parseFromStream(CharReader::Factory const& fact, IStream& sin, Value* root,
|
||||
String* errs) {
|
||||
OStringStream ssin;
|
||||
ssin << sin.rdbuf();
|
||||
String doc = ssin.str();
|
||||
String doc = std::move(ssin).str();
|
||||
char const* begin = doc.data();
|
||||
char const* end = begin + doc.size();
|
||||
// Note that we do not actually need a null-terminator.
|
||||
|
@ -1205,7 +1205,7 @@ bool Value::removeIndex(ArrayIndex index, Value* removed) {
|
||||
return false;
|
||||
}
|
||||
if (removed)
|
||||
*removed = it->second;
|
||||
*removed = std::move(it->second);
|
||||
ArrayIndex oldSize = size();
|
||||
// shift left all items left, into the place of the "removed"
|
||||
for (ArrayIndex i = index; i < (oldSize - 1); ++i) {
|
||||
@ -1410,9 +1410,8 @@ void Value::setComment(String comment, CommentPlacement placement) {
|
||||
// Always discard trailing newline, to aid indentation.
|
||||
comment.pop_back();
|
||||
}
|
||||
JSON_ASSERT(!comment.empty());
|
||||
JSON_ASSERT_MESSAGE(
|
||||
comment[0] == '\0' || comment[0] == '/',
|
||||
comment.empty() || comment[0] == '/',
|
||||
"in Json::Value::setComment(): Comments must start with /");
|
||||
comments_.set(placement, std::move(comment));
|
||||
}
|
||||
|
@ -354,6 +354,10 @@ String valueToQuotedString(const char* value) {
|
||||
return valueToQuotedStringN(value, strlen(value));
|
||||
}
|
||||
|
||||
String valueToQuotedString(const char* value, size_t length) {
|
||||
return valueToQuotedStringN(value, length);
|
||||
}
|
||||
|
||||
// Class Writer
|
||||
// //////////////////////////////////////////////////////////////////
|
||||
Writer::~Writer() = default;
|
||||
@ -491,7 +495,7 @@ void StyledWriter::writeValue(const Value& value) {
|
||||
const String& name = *it;
|
||||
const Value& childValue = value[name];
|
||||
writeCommentBeforeValue(childValue);
|
||||
writeWithIndent(valueToQuotedString(name.c_str()));
|
||||
writeWithIndent(valueToQuotedString(name.c_str(), name.size()));
|
||||
document_ += " : ";
|
||||
writeValue(childValue);
|
||||
if (++it == members.end()) {
|
||||
@ -709,7 +713,7 @@ void StyledStreamWriter::writeValue(const Value& value) {
|
||||
const String& name = *it;
|
||||
const Value& childValue = value[name];
|
||||
writeCommentBeforeValue(childValue);
|
||||
writeWithIndent(valueToQuotedString(name.c_str()));
|
||||
writeWithIndent(valueToQuotedString(name.c_str(), name.size()));
|
||||
*document_ << " : ";
|
||||
writeValue(childValue);
|
||||
if (++it == members.end()) {
|
||||
@ -1247,7 +1251,7 @@ String writeString(StreamWriter::Factory const& factory, Value const& root) {
|
||||
OStringStream sout;
|
||||
StreamWriterPtr const writer(factory.newStreamWriter());
|
||||
writer->write(root, &sout);
|
||||
return sout.str();
|
||||
return std::move(sout).str();
|
||||
}
|
||||
|
||||
OStream& operator<<(OStream& sout, Value const& root) {
|
||||
|
@ -3917,6 +3917,36 @@ JSONTEST_FIXTURE_LOCAL(FuzzTest, fuzzDoesntCrash) {
|
||||
example.size()));
|
||||
}
|
||||
|
||||
struct ParseWithStructuredErrorsTest : JsonTest::TestCase {
|
||||
void testErrors(
|
||||
const std::string& doc, bool success,
|
||||
const std::vector<Json::CharReader::StructuredError>& expectedErrors) {
|
||||
Json::CharReaderBuilder b;
|
||||
CharReaderPtr reader(b.newCharReader());
|
||||
Json::Value root;
|
||||
JSONTEST_ASSERT_EQUAL(
|
||||
reader->parse(doc.data(), doc.data() + doc.length(), &root, nullptr),
|
||||
success);
|
||||
auto actualErrors = reader->getStructuredErrors();
|
||||
JSONTEST_ASSERT_EQUAL(expectedErrors.size(), actualErrors.size());
|
||||
for (std::size_t i = 0; i < actualErrors.size(); i++) {
|
||||
const auto& a = actualErrors[i];
|
||||
const auto& e = expectedErrors[i];
|
||||
JSONTEST_ASSERT_EQUAL(a.offset_start, e.offset_start);
|
||||
JSONTEST_ASSERT_EQUAL(a.offset_limit, e.offset_limit);
|
||||
JSONTEST_ASSERT_STRING_EQUAL(a.message, e.message);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
JSONTEST_FIXTURE_LOCAL(ParseWithStructuredErrorsTest, success) {
|
||||
testErrors("{}", true, {});
|
||||
}
|
||||
|
||||
JSONTEST_FIXTURE_LOCAL(ParseWithStructuredErrorsTest, singleError) {
|
||||
testErrors("{ 1 : 2 }", false, {{2, 3, "Missing '}' or object member name"}});
|
||||
}
|
||||
|
||||
int main(int argc, const char* argv[]) {
|
||||
JsonTest::Runner runner;
|
||||
|
||||
|
4
test/data/fail_strict_comment_01.json
Normal file
4
test/data/fail_strict_comment_01.json
Normal file
@ -0,0 +1,4 @@
|
||||
{
|
||||
"a": "aaa",
|
||||
"b": "bbb" // comments not allowed in strict mode
|
||||
}
|
4
test/data/fail_strict_comment_02.json
Normal file
4
test/data/fail_strict_comment_02.json
Normal file
@ -0,0 +1,4 @@
|
||||
{
|
||||
"a": "aaa", // comments not allowed in strict mode
|
||||
"b": "bbb"
|
||||
}
|
3
test/data/fail_strict_comment_03.json
Normal file
3
test/data/fail_strict_comment_03.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"array" : [1, 2, 3 /* comments not allowed in strict mode */]
|
||||
}
|
1
test/data/fail_test_object_02.json
Normal file
1
test/data/fail_test_object_02.json
Normal file
@ -0,0 +1 @@
|
||||
{"one": 1 /* } */ { "two" : 2 }
|
@ -97,14 +97,17 @@ def runAllTests(jsontest_executable_path, input_dir = None,
|
||||
valgrind_path = use_valgrind and VALGRIND_CMD or ''
|
||||
for input_path in tests + test_jsonchecker:
|
||||
expect_failure = os.path.basename(input_path).startswith('fail')
|
||||
is_json_checker_test = (input_path in test_jsonchecker) or expect_failure
|
||||
is_json_checker_test = input_path in test_jsonchecker
|
||||
is_parse_only = is_json_checker_test or expect_failure
|
||||
is_strict_test = ('_strict_' in os.path.basename(input_path)) or is_json_checker_test
|
||||
print('TESTING:', input_path, end=' ')
|
||||
options = is_json_checker_test and '--json-checker' or ''
|
||||
options = is_parse_only and '--parse-only' or ''
|
||||
options += is_strict_test and ' --strict' or ''
|
||||
options += ' --json-writer %s'%writerClass
|
||||
cmd = '%s%s %s "%s"' % ( valgrind_path, jsontest_executable_path, options,
|
||||
input_path)
|
||||
status, process_output = getStatusOutput(cmd)
|
||||
if is_json_checker_test:
|
||||
if is_parse_only:
|
||||
if expect_failure:
|
||||
if not status:
|
||||
print('FAILED')
|
||||
|
Loading…
Reference in New Issue
Block a user