From 0462c68b6bb2e202c230b2a250a643be0676c912 Mon Sep 17 00:00:00 2001 From: DingWenFang <13693510034@139.com> Date: Sat, 27 Jun 2020 22:00:41 +0800 Subject: [PATCH 1/8] mongodb as policy store --- Gopkg.lock | 135 +- Gopkg.toml | 4 + Makefile | 8 + api/pms/types.go | 58 +- cmd/spctl/command/run_mongodb_test.sh | 13 + cmd/speedle-ads/stores.go | 1 + cmd/speedle-pms/stores.go | 1 + pkg/store/mongodb/mongoStoreConfig.json | 7 + pkg/store/mongodb/mongoStoreConfigCloud.json | 7 + pkg/store/mongodb/mongodbStore.go | 852 +++++ pkg/store/mongodb/mongodbStore_test.go | 692 ++++ pkg/store/mongodb/storeBuiler.go | 81 + pkg/svcs/adsgrpc/run_mongodb_test.sh | 18 + pkg/svcs/adsrest/run_mongodb_test.sh | 21 + pkg/svcs/pmsgrpc/run_mongodb_test.sh | 20 + pkg/svcs/pmsrest/config_mongdb_cloud.json | 10 + pkg/svcs/pmsrest/config_mongodb.json | 10 + pkg/svcs/pmsrest/run_mongodb_test.sh | 19 + setTestEnv.sh | 2 +- vendor/github.com/alecthomas/template/LICENSE | 27 + vendor/github.com/alecthomas/template/doc.go | 406 +++ vendor/github.com/alecthomas/template/exec.go | 845 +++++ .../github.com/alecthomas/template/funcs.go | 598 +++ .../github.com/alecthomas/template/helper.go | 108 + .../alecthomas/template/parse/lex.go | 556 +++ .../alecthomas/template/parse/node.go | 834 +++++ .../alecthomas/template/parse/parse.go | 700 ++++ .../alecthomas/template/template.go | 218 ++ vendor/github.com/alecthomas/units/COPYING | 19 + vendor/github.com/alecthomas/units/bytes.go | 85 + vendor/github.com/alecthomas/units/doc.go | 13 + vendor/github.com/alecthomas/units/si.go | 50 + vendor/github.com/alecthomas/units/util.go | 138 + .../coreos/etcd/Documentation/README.md | 1 + vendor/github.com/coreos/etcd/cmd/etcd | 1 + vendor/github.com/coreos/etcd/cmd/etcdctl | 1 + vendor/github.com/coreos/etcd/cmd/functional | 1 + vendor/github.com/coreos/etcd/cmd/tools | 1 + vendor/github.com/go-stack/stack/LICENSE.md | 21 + vendor/github.com/go-stack/stack/stack.go | 400 ++ vendor/github.com/golang/snappy/AUTHORS | 16 + vendor/github.com/golang/snappy/CONTRIBUTORS | 38 + vendor/github.com/golang/snappy/LICENSE | 27 + vendor/github.com/golang/snappy/decode.go | 241 ++ .../github.com/golang/snappy/decode_amd64.go | 14 + .../github.com/golang/snappy/decode_amd64.s | 490 +++ .../github.com/golang/snappy/decode_other.go | 115 + vendor/github.com/golang/snappy/encode.go | 289 ++ .../github.com/golang/snappy/encode_amd64.go | 29 + .../github.com/golang/snappy/encode_amd64.s | 730 ++++ .../github.com/golang/snappy/encode_other.go | 238 ++ vendor/github.com/golang/snappy/snappy.go | 98 + .../go-grpc-middleware/auth/README.md | 1 + .../go-grpc-middleware/logging/README.md | 1 + .../logging/logrus/README.md | 1 + .../go-grpc-middleware/logging/zap/README.md | 1 + .../go-grpc-middleware/recovery/README.md | 1 + .../go-grpc-middleware/retry/README.md | 1 + .../go-grpc-middleware/tags/README.md | 1 + .../tracing/opentracing/README.md | 1 + .../util/metautils/README.md | 1 + vendor/github.com/klauspost/compress/LICENSE | 28 + .../klauspost/compress/fse/bitreader.go | 122 + .../klauspost/compress/fse/bitwriter.go | 168 + .../klauspost/compress/fse/bytereader.go | 47 + .../klauspost/compress/fse/compress.go | 684 ++++ .../klauspost/compress/fse/decompress.go | 374 ++ .../github.com/klauspost/compress/fse/fse.go | 144 + .../klauspost/compress/huff0/bitreader.go | 329 ++ .../klauspost/compress/huff0/bitwriter.go | 197 + .../klauspost/compress/huff0/bytereader.go | 54 + .../klauspost/compress/huff0/compress.go | 651 ++++ .../klauspost/compress/huff0/decompress.go | 1146 ++++++ .../klauspost/compress/huff0/huff0.go | 260 ++ .../github.com/klauspost/compress/s2/LICENSE | 28 + .../s2/cmd/internal/readahead/LICENSE | 22 + .../klauspost/compress/snappy/AUTHORS | 15 + .../klauspost/compress/snappy/CONTRIBUTORS | 37 + .../klauspost/compress/snappy/LICENSE | 27 + .../klauspost/compress/snappy/decode.go | 237 ++ .../klauspost/compress/snappy/decode_amd64.go | 14 + .../klauspost/compress/snappy/decode_amd64.s | 482 +++ .../klauspost/compress/snappy/decode_other.go | 115 + .../klauspost/compress/snappy/encode.go | 285 ++ .../klauspost/compress/snappy/encode_amd64.go | 29 + .../klauspost/compress/snappy/encode_amd64.s | 730 ++++ .../klauspost/compress/snappy/encode_other.go | 238 ++ .../klauspost/compress/snappy/snappy.go | 98 + .../klauspost/compress/zstd/bitreader.go | 136 + .../klauspost/compress/zstd/bitwriter.go | 169 + .../klauspost/compress/zstd/blockdec.go | 739 ++++ .../klauspost/compress/zstd/blockenc.go | 837 +++++ .../compress/zstd/blocktype_string.go | 85 + .../klauspost/compress/zstd/bytebuf.go | 127 + .../klauspost/compress/zstd/bytereader.go | 88 + .../klauspost/compress/zstd/decoder.go | 546 +++ .../compress/zstd/decoder_options.go | 68 + .../klauspost/compress/zstd/dict.go | 104 + .../klauspost/compress/zstd/enc_better.go | 518 +++ .../klauspost/compress/zstd/enc_dfast.go | 678 ++++ .../klauspost/compress/zstd/enc_fast.go | 755 ++++ .../klauspost/compress/zstd/enc_params.go | 157 + .../klauspost/compress/zstd/encoder.go | 560 +++ .../compress/zstd/encoder_options.go | 249 ++ .../klauspost/compress/zstd/framedec.go | 494 +++ .../klauspost/compress/zstd/frameenc.go | 115 + .../klauspost/compress/zstd/fse_decoder.go | 385 ++ .../klauspost/compress/zstd/fse_encoder.go | 726 ++++ .../klauspost/compress/zstd/fse_predefined.go | 158 + .../klauspost/compress/zstd/hash.go | 77 + .../klauspost/compress/zstd/history.go | 89 + .../compress/zstd/internal/xxhash/LICENSE.txt | 22 + .../compress/zstd/internal/xxhash/xxhash.go | 238 ++ .../zstd/internal/xxhash/xxhash_amd64.go | 13 + .../zstd/internal/xxhash/xxhash_amd64.s | 215 ++ .../zstd/internal/xxhash/xxhash_other.go | 76 + .../zstd/internal/xxhash/xxhash_safe.go | 11 + .../klauspost/compress/zstd/seqdec.go | 485 +++ .../klauspost/compress/zstd/seqenc.go | 115 + .../klauspost/compress/zstd/snappy.go | 436 +++ .../klauspost/compress/zstd/zstd.go | 144 + .../common/log/eventlog_formatter.go | 89 + .../github.com/prometheus/common/log/log.go | 364 ++ .../prometheus/common/log/syslog_formatter.go | 126 + vendor/github.com/xdg/scram/LICENSE | 175 + vendor/github.com/xdg/scram/client.go | 130 + vendor/github.com/xdg/scram/client_conv.go | 149 + vendor/github.com/xdg/scram/common.go | 97 + vendor/github.com/xdg/scram/doc.go | 24 + vendor/github.com/xdg/scram/parse.go | 205 ++ vendor/github.com/xdg/scram/scram.go | 66 + vendor/github.com/xdg/scram/server.go | 50 + vendor/github.com/xdg/scram/server_conv.go | 151 + vendor/github.com/xdg/stringprep/LICENSE | 175 + vendor/github.com/xdg/stringprep/bidi.go | 73 + vendor/github.com/xdg/stringprep/doc.go | 10 + vendor/github.com/xdg/stringprep/error.go | 14 + vendor/github.com/xdg/stringprep/map.go | 21 + vendor/github.com/xdg/stringprep/profile.go | 75 + vendor/github.com/xdg/stringprep/saslprep.go | 52 + vendor/github.com/xdg/stringprep/set.go | 36 + vendor/github.com/xdg/stringprep/tables.go | 3215 +++++++++++++++++ vendor/go.mongodb.org/mongo-driver/LICENSE | 201 ++ .../mongo-driver/THIRD-PARTY-NOTICES | 1336 +++++++ .../go.mongodb.org/mongo-driver/bson/bson.go | 50 + .../mongo-driver/bson/bson_1_8.go | 81 + .../mongo-driver/bson/bsoncodec/bsoncodec.go | 163 + .../bson/bsoncodec/byte_slice_codec.go | 87 + .../bson/bsoncodec/cond_addr_codec.go | 63 + .../bson/bsoncodec/default_value_decoders.go | 1249 +++++++ .../bson/bsoncodec/default_value_encoders.go | 771 ++++ .../mongo-driver/bson/bsoncodec/doc.go | 84 + .../bson/bsoncodec/empty_interface_codec.go | 125 + .../mongo-driver/bson/bsoncodec/map_codec.go | 206 ++ .../mongo-driver/bson/bsoncodec/mode.go | 65 + .../bson/bsoncodec/pointer_codec.go | 110 + .../mongo-driver/bson/bsoncodec/proxy.go | 14 + .../mongo-driver/bson/bsoncodec/registry.go | 472 +++ .../bson/bsoncodec/slice_codec.go | 196 + .../bson/bsoncodec/string_codec.go | 94 + .../bson/bsoncodec/struct_codec.go | 536 +++ .../bson/bsoncodec/struct_tag_parser.go | 119 + .../mongo-driver/bson/bsoncodec/time_codec.go | 101 + .../mongo-driver/bson/bsoncodec/types.go | 81 + .../mongo-driver/bson/bsoncodec/uint_codec.go | 150 + .../bsonoptions/byte_slice_codec_options.go | 38 + .../empty_interface_codec_options.go | 38 + .../bson/bsonoptions/map_codec_options.go | 48 + .../bson/bsonoptions/slice_codec_options.go | 38 + .../bson/bsonoptions/string_codec_options.go | 41 + .../bson/bsonoptions/struct_codec_options.go | 70 + .../bson/bsonoptions/time_codec_options.go | 38 + .../bson/bsonoptions/uint_codec_options.go | 38 + .../mongo-driver/bson/bsonrw/copier.go | 389 ++ .../mongo-driver/bson/bsonrw/doc.go | 9 + .../bson/bsonrw/extjson_parser.go | 738 ++++ .../bson/bsonrw/extjson_reader.go | 659 ++++ .../bson/bsonrw/extjson_tables.go | 223 ++ .../bson/bsonrw/extjson_wrappers.go | 498 +++ .../bson/bsonrw/extjson_writer.go | 737 ++++ .../mongo-driver/bson/bsonrw/json_scanner.go | 439 +++ .../mongo-driver/bson/bsonrw/mode.go | 108 + .../mongo-driver/bson/bsonrw/reader.go | 63 + .../mongo-driver/bson/bsonrw/value_reader.go | 877 +++++ .../mongo-driver/bson/bsonrw/value_writer.go | 604 ++++ .../mongo-driver/bson/bsonrw/writer.go | 102 + .../mongo-driver/bson/bsontype/bsontype.go | 95 + .../mongo-driver/bson/decoder.go | 118 + .../go.mongodb.org/mongo-driver/bson/doc.go | 120 + .../mongo-driver/bson/encoder.go | 99 + .../mongo-driver/bson/marshal.go | 223 ++ .../mongo-driver/bson/primitive/decimal.go | 376 ++ .../mongo-driver/bson/primitive/objectid.go | 165 + .../mongo-driver/bson/primitive/primitive.go | 196 + .../mongo-driver/bson/primitive_codecs.go | 111 + .../go.mongodb.org/mongo-driver/bson/raw.go | 92 + .../mongo-driver/bson/raw_element.go | 51 + .../mongo-driver/bson/raw_value.go | 287 ++ .../mongo-driver/bson/registry.go | 24 + .../go.mongodb.org/mongo-driver/bson/types.go | 85 + .../mongo-driver/bson/unmarshal.go | 101 + .../mongo-driver/etc/generate-notices.pl | 99 + .../mongo-driver/event/monitoring.go | 91 + .../mongo-driver/internal/const.go | 10 + .../mongo-driver/internal/error.go | 119 + .../mongo-driver/internal/semaphore.go | 57 + .../mongo-driver/mongo/batch_cursor.go | 42 + .../mongo-driver/mongo/bulk_write.go | 457 +++ .../mongo-driver/mongo/bulk_write_models.go | 245 ++ .../mongo-driver/mongo/change_stream.go | 580 +++ .../mongo/change_stream_deployment.go | 49 + .../mongo-driver/mongo/client.go | 825 +++++ .../mongo-driver/mongo/client_encryption.go | 135 + .../mongo-driver/mongo/collection.go | 1634 +++++++++ .../mongo-driver/mongo/crypt_retrievers.go | 59 + .../mongo-driver/mongo/cursor.go | 252 ++ .../mongo-driver/mongo/database.go | 435 +++ .../go.mongodb.org/mongo-driver/mongo/doc.go | 123 + .../mongo-driver/mongo/errors.go | 275 ++ .../mongo/index_options_builder.go | 134 + .../mongo-driver/mongo/index_view.go | 468 +++ .../mongo-driver/mongo/mongo.go | 484 +++ .../mongo-driver/mongo/mongocryptd.go | 150 + .../mongo/options/aggregateoptions.go | 137 + .../mongo/options/autoencryptionoptions.go | 109 + .../mongo/options/bulkwriteoptions.go | 60 + .../mongo/options/changestreamoptions.go | 132 + .../mongo/options/clientencryptionoptions.go | 49 + .../mongo/options/clientoptions.go | 909 +++++ .../mongo/options/clientoptions_1_10.go | 9 + .../mongo/options/clientoptions_1_9.go | 13 + .../mongo/options/collectionoptions.go | 88 + .../mongo/options/countoptions.go | 94 + .../mongo/options/datakeyoptions.go | 55 + .../mongo-driver/mongo/options/dboptions.go | 88 + .../mongo/options/deleteoptions.go | 41 + .../mongo/options/distinctoptions.go | 57 + .../mongo/options/encryptoptions.go | 64 + .../mongo/options/estimatedcountoptions.go | 44 + .../mongo-driver/mongo/options/findoptions.go | 852 +++++ .../mongo/options/gridfsoptions.go | 306 ++ .../mongo/options/indexoptions.go | 397 ++ .../mongo/options/insertoptions.go | 93 + .../mongo/options/listcollectionsoptions.go | 40 + .../mongo/options/listdatabasesoptions.go | 41 + .../mongo/options/mongooptions.go | 161 + .../mongo/options/replaceoptions.go | 70 + .../mongo/options/runcmdoptions.go | 42 + .../mongo/options/sessionoptions.go | 106 + .../mongo/options/transactionoptions.go | 92 + .../mongo/options/updateoptions.go | 83 + .../mongo/readconcern/readconcern.go | 82 + .../mongo-driver/mongo/readpref/mode.go | 56 + .../mongo-driver/mongo/readpref/options.go | 60 + .../mongo-driver/mongo/readpref/readpref.go | 99 + .../mongo-driver/mongo/results.go | 144 + .../mongo-driver/mongo/session.go | 328 ++ .../mongo-driver/mongo/single_result.go | 95 + .../go.mongodb.org/mongo-driver/mongo/util.go | 7 + .../mongo/writeconcern/writeconcern.go | 216 ++ vendor/go.mongodb.org/mongo-driver/tag/tag.go | 57 + .../mongo-driver/version/version.go | 10 + .../mongo-driver/x/bsonx/array.go | 97 + .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 843 +++++ .../mongo-driver/x/bsonx/bsoncore/document.go | 399 ++ .../x/bsonx/bsoncore/document_sequence.go | 183 + .../mongo-driver/x/bsonx/bsoncore/element.go | 152 + .../mongo-driver/x/bsonx/bsoncore/tables.go | 223 ++ .../mongo-driver/x/bsonx/bsoncore/value.go | 1015 ++++++ .../mongo-driver/x/bsonx/constructor.go | 166 + .../mongo-driver/x/bsonx/document.go | 305 ++ .../mongo-driver/x/bsonx/element.go | 53 + .../mongo-driver/x/bsonx/mdocument.go | 231 ++ .../mongo-driver/x/bsonx/primitive_codecs.go | 638 ++++ .../mongo-driver/x/bsonx/registry.go | 22 + .../mongo-driver/x/bsonx/value.go | 900 +++++ .../x/mongo/driver/address/addr.go | 49 + .../mongo-driver/x/mongo/driver/auth/auth.go | 153 + .../mongo-driver/x/mongo/driver/auth/cred.go | 16 + .../x/mongo/driver/auth/default.go | 67 + .../mongo-driver/x/mongo/driver/auth/doc.go | 23 + .../x/mongo/driver/auth/gssapi.go | 60 + .../x/mongo/driver/auth/gssapi_not_enabled.go | 16 + .../mongo/driver/auth/gssapi_not_supported.go | 21 + .../mongo/driver/auth/internal/gssapi/gss.go | 166 + .../driver/auth/internal/gssapi/gss_wrapper.c | 248 ++ .../driver/auth/internal/gssapi/gss_wrapper.h | 66 + .../mongo/driver/auth/internal/gssapi/sspi.go | 352 ++ .../auth/internal/gssapi/sspi_wrapper.c | 218 ++ .../auth/internal/gssapi/sspi_wrapper.h | 58 + .../x/mongo/driver/auth/mongodbcr.go | 94 + .../mongo-driver/x/mongo/driver/auth/plain.go | 56 + .../mongo-driver/x/mongo/driver/auth/sasl.go | 112 + .../mongo-driver/x/mongo/driver/auth/scram.go | 102 + .../mongo-driver/x/mongo/driver/auth/util.go | 23 + .../mongo-driver/x/mongo/driver/auth/x509.go | 49 + .../x/mongo/driver/batch_cursor.go | 322 ++ .../mongo-driver/x/mongo/driver/batches.go | 74 + .../x/mongo/driver/compression.go | 105 + .../x/mongo/driver/connstring/connstring.go | 789 ++++ .../mongo-driver/x/mongo/driver/crypt.go | 333 ++ .../x/mongo/driver/description/description.go | 10 + .../x/mongo/driver/description/feature.go | 36 + .../x/mongo/driver/description/server.go | 357 ++ .../x/mongo/driver/description/server_kind.go | 43 + .../driver/description/server_selector.go | 286 ++ .../x/mongo/driver/description/topology.go | 93 + .../mongo/driver/description/topology_kind.go | 37 + .../x/mongo/driver/description/version.go | 44 + .../mongo/driver/description/version_range.go | 31 + .../mongo-driver/x/mongo/driver/dns/dns.go | 139 + .../mongo-driver/x/mongo/driver/driver.go | 183 + .../mongo-driver/x/mongo/driver/errors.go | 409 +++ .../mongo-driver/x/mongo/driver/legacy.go | 16 + .../driver/list_collections_batch_cursor.go | 129 + .../x/mongo/driver/mongocrypt/binary.go | 55 + .../x/mongo/driver/mongocrypt/errors.go | 43 + .../driver/mongocrypt/errors_not_enabled.go | 20 + .../x/mongo/driver/mongocrypt/mongocrypt.go | 292 ++ .../driver/mongocrypt/mongocrypt_context.go | 103 + .../mongocrypt_context_not_enabled.go | 56 + .../mongocrypt/mongocrypt_kms_context.go | 69 + .../mongocrypt_kms_context_not_enabled.go | 38 + .../mongocrypt/mongocrypt_not_enabled.go | 54 + .../options/mongocrypt_context_options.go | 65 + .../mongocrypt/options/mongocrypt_options.go | 41 + .../mongocrypt/options/provider_options.go | 46 + .../x/mongo/driver/mongocrypt/state.go | 43 + .../mongo-driver/x/mongo/driver/operation.go | 1320 +++++++ .../driver/operation/abort_transaction.go | 189 + .../x/mongo/driver/operation/aggregate.go | 355 ++ .../x/mongo/driver/operation/command.go | 176 + .../driver/operation/commit_transaction.go | 192 + .../x/mongo/driver/operation/count.go | 245 ++ .../x/mongo/driver/operation/createIndexes.go | 236 ++ .../x/mongo/driver/operation/delete.go | 240 ++ .../x/mongo/driver/operation/distinct.go | 274 ++ .../mongo/driver/operation/drop_collection.go | 198 + .../x/mongo/driver/operation/drop_database.go | 180 + .../x/mongo/driver/operation/drop_indexes.go | 221 ++ .../x/mongo/driver/operation/end_sessions.go | 151 + .../x/mongo/driver/operation/find.go | 495 +++ .../mongo/driver/operation/find_and_modify.go | 406 +++ .../x/mongo/driver/operation/insert.go | 254 ++ .../x/mongo/driver/operation/ismaster.go | 429 +++ .../x/mongo/driver/operation/listDatabases.go | 300 ++ .../driver/operation/list_collections.go | 210 ++ .../x/mongo/driver/operation/list_indexes.go | 212 ++ .../x/mongo/driver/operation/operation.go | 15 + .../x/mongo/driver/operation/update.go | 307 ++ .../x/mongo/driver/operation_legacy.go | 686 ++++ .../x/mongo/driver/session/client_session.go | 403 +++ .../x/mongo/driver/session/cluster_clock.go | 36 + .../x/mongo/driver/session/options.go | 58 + .../x/mongo/driver/session/server_session.go | 69 + .../x/mongo/driver/session/session_pool.go | 180 + .../x/mongo/driver/topology/connection.go | 533 +++ .../driver/topology/connection_legacy.go | 1 + .../connection_legacy_command_metadata.go | 28 + .../driver/topology/connection_options.go | 182 + .../x/mongo/driver/topology/errors.go | 22 + .../x/mongo/driver/topology/fsm.go | 350 ++ .../x/mongo/driver/topology/pool.go | 482 +++ .../x/mongo/driver/topology/resource_pool.go | 218 ++ .../x/mongo/driver/topology/server.go | 656 ++++ .../x/mongo/driver/topology/server_options.go | 148 + .../x/mongo/driver/topology/topology.go | 678 ++++ .../mongo/driver/topology/topology_options.go | 403 +++ .../driver/topology/topology_options_1_10.go | 9 + .../driver/topology/topology_options_1_9.go | 13 + .../mongo-driver/x/mongo/driver/uuid/uuid.go | 37 + .../x/mongo/driver/wiremessage/wiremessage.go | 555 +++ vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 + vendor/golang.org/x/sync/AUTHORS | 3 + vendor/golang.org/x/sync/CONTRIBUTORS | 3 + vendor/golang.org/x/sync/LICENSE | 27 + vendor/golang.org/x/sync/PATENTS | 22 + .../golang.org/x/sync/semaphore/semaphore.go | 136 + .../golang.org/x/sys/windows/registry/key.go | 198 + .../x/sys/windows/registry/mksyscall.go | 7 + .../x/sys/windows/registry/syscall.go | 32 + .../x/sys/windows/registry/value.go | 384 ++ .../sys/windows/registry/zsyscall_windows.go | 120 + .../x/sys/windows/svc/eventlog/install.go | 80 + .../x/sys/windows/svc/eventlog/log.go | 70 + vendor/gopkg.in/alecthomas/kingpin.v2/COPYING | 19 + .../gopkg.in/alecthomas/kingpin.v2/actions.go | 42 + vendor/gopkg.in/alecthomas/kingpin.v2/app.go | 688 ++++ vendor/gopkg.in/alecthomas/kingpin.v2/args.go | 184 + vendor/gopkg.in/alecthomas/kingpin.v2/cmd.go | 274 ++ .../alecthomas/kingpin.v2/completions.go | 33 + vendor/gopkg.in/alecthomas/kingpin.v2/doc.go | 68 + .../gopkg.in/alecthomas/kingpin.v2/envar.go | 45 + .../gopkg.in/alecthomas/kingpin.v2/flags.go | 308 ++ .../gopkg.in/alecthomas/kingpin.v2/global.go | 94 + .../alecthomas/kingpin.v2/guesswidth.go | 9 + .../alecthomas/kingpin.v2/guesswidth_unix.go | 38 + .../gopkg.in/alecthomas/kingpin.v2/model.go | 227 ++ .../gopkg.in/alecthomas/kingpin.v2/parser.go | 396 ++ .../gopkg.in/alecthomas/kingpin.v2/parsers.go | 212 ++ .../alecthomas/kingpin.v2/templates.go | 262 ++ .../gopkg.in/alecthomas/kingpin.v2/usage.go | 211 ++ .../gopkg.in/alecthomas/kingpin.v2/values.go | 470 +++ .../alecthomas/kingpin.v2/values_generated.go | 821 +++++ 404 files changed, 86606 insertions(+), 33 deletions(-) create mode 100755 cmd/spctl/command/run_mongodb_test.sh create mode 100644 pkg/store/mongodb/mongoStoreConfig.json create mode 100644 pkg/store/mongodb/mongoStoreConfigCloud.json create mode 100644 pkg/store/mongodb/mongodbStore.go create mode 100644 pkg/store/mongodb/mongodbStore_test.go create mode 100644 pkg/store/mongodb/storeBuiler.go create mode 100755 pkg/svcs/adsgrpc/run_mongodb_test.sh create mode 100755 pkg/svcs/adsrest/run_mongodb_test.sh create mode 100755 pkg/svcs/pmsgrpc/run_mongodb_test.sh create mode 100644 pkg/svcs/pmsrest/config_mongdb_cloud.json create mode 100644 pkg/svcs/pmsrest/config_mongodb.json create mode 100755 pkg/svcs/pmsrest/run_mongodb_test.sh create mode 100644 vendor/github.com/alecthomas/template/LICENSE create mode 100644 vendor/github.com/alecthomas/template/doc.go create mode 100644 vendor/github.com/alecthomas/template/exec.go create mode 100644 vendor/github.com/alecthomas/template/funcs.go create mode 100644 vendor/github.com/alecthomas/template/helper.go create mode 100644 vendor/github.com/alecthomas/template/parse/lex.go create mode 100644 vendor/github.com/alecthomas/template/parse/node.go create mode 100644 vendor/github.com/alecthomas/template/parse/parse.go create mode 100644 vendor/github.com/alecthomas/template/template.go create mode 100644 vendor/github.com/alecthomas/units/COPYING create mode 100644 vendor/github.com/alecthomas/units/bytes.go create mode 100644 vendor/github.com/alecthomas/units/doc.go create mode 100644 vendor/github.com/alecthomas/units/si.go create mode 100644 vendor/github.com/alecthomas/units/util.go create mode 120000 vendor/github.com/coreos/etcd/Documentation/README.md create mode 120000 vendor/github.com/coreos/etcd/cmd/etcd create mode 120000 vendor/github.com/coreos/etcd/cmd/etcdctl create mode 120000 vendor/github.com/coreos/etcd/cmd/functional create mode 120000 vendor/github.com/coreos/etcd/cmd/tools create mode 100644 vendor/github.com/go-stack/stack/LICENSE.md create mode 100644 vendor/github.com/go-stack/stack/stack.go create mode 100644 vendor/github.com/golang/snappy/AUTHORS create mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS create mode 100644 vendor/github.com/golang/snappy/LICENSE create mode 100644 vendor/github.com/golang/snappy/decode.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.s create mode 100644 vendor/github.com/golang/snappy/decode_other.go create mode 100644 vendor/github.com/golang/snappy/encode.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.s create mode 100644 vendor/github.com/golang/snappy/encode_other.go create mode 100644 vendor/github.com/golang/snappy/snappy.go create mode 120000 vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/README.md create mode 120000 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/README.md create mode 120000 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/README.md create mode 120000 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/README.md create mode 120000 vendor/github.com/grpc-ecosystem/go-grpc-middleware/recovery/README.md create mode 120000 vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/README.md create mode 120000 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/README.md create mode 120000 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/README.md create mode 120000 vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/README.md create mode 100644 vendor/github.com/klauspost/compress/LICENSE create mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/fse/compress.go create mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go create mode 100644 vendor/github.com/klauspost/compress/fse/fse.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go create mode 100644 vendor/github.com/klauspost/compress/s2/LICENSE create mode 100644 vendor/github.com/klauspost/compress/s2/cmd/internal/readahead/LICENSE create mode 100644 vendor/github.com/klauspost/compress/snappy/AUTHORS create mode 100644 vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS create mode 100644 vendor/github.com/klauspost/compress/snappy/LICENSE create mode 100644 vendor/github.com/klauspost/compress/snappy/decode.go create mode 100644 vendor/github.com/klauspost/compress/snappy/decode_amd64.go create mode 100644 vendor/github.com/klauspost/compress/snappy/decode_amd64.s create mode 100644 vendor/github.com/klauspost/compress/snappy/decode_other.go create mode 100644 vendor/github.com/klauspost/compress/snappy/encode.go create mode 100644 vendor/github.com/klauspost/compress/snappy/encode_amd64.go create mode 100644 vendor/github.com/klauspost/compress/snappy/encode_amd64.s create mode 100644 vendor/github.com/klauspost/compress/snappy/encode_other.go create mode 100644 vendor/github.com/klauspost/compress/snappy/snappy.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_params.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go create mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/history.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go create mode 100644 vendor/github.com/prometheus/common/log/eventlog_formatter.go create mode 100644 vendor/github.com/prometheus/common/log/log.go create mode 100644 vendor/github.com/prometheus/common/log/syslog_formatter.go create mode 100644 vendor/github.com/xdg/scram/LICENSE create mode 100644 vendor/github.com/xdg/scram/client.go create mode 100644 vendor/github.com/xdg/scram/client_conv.go create mode 100644 vendor/github.com/xdg/scram/common.go create mode 100644 vendor/github.com/xdg/scram/doc.go create mode 100644 vendor/github.com/xdg/scram/parse.go create mode 100644 vendor/github.com/xdg/scram/scram.go create mode 100644 vendor/github.com/xdg/scram/server.go create mode 100644 vendor/github.com/xdg/scram/server_conv.go create mode 100644 vendor/github.com/xdg/stringprep/LICENSE create mode 100644 vendor/github.com/xdg/stringprep/bidi.go create mode 100644 vendor/github.com/xdg/stringprep/doc.go create mode 100644 vendor/github.com/xdg/stringprep/error.go create mode 100644 vendor/github.com/xdg/stringprep/map.go create mode 100644 vendor/github.com/xdg/stringprep/profile.go create mode 100644 vendor/github.com/xdg/stringprep/saslprep.go create mode 100644 vendor/github.com/xdg/stringprep/set.go create mode 100644 vendor/github.com/xdg/stringprep/tables.go create mode 100644 vendor/go.mongodb.org/mongo-driver/LICENSE create mode 100644 vendor/go.mongodb.org/mongo-driver/THIRD-PARTY-NOTICES create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bson.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/mode.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/decoder.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/encoder.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/marshal.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw_element.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw_value.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/registry.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/types.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go create mode 100755 vendor/go.mongodb.org/mongo-driver/etc/generate-notices.pl create mode 100644 vendor/go.mongodb.org/mongo-driver/event/monitoring.go create mode 100644 vendor/go.mongodb.org/mongo-driver/internal/const.go create mode 100644 vendor/go.mongodb.org/mongo-driver/internal/error.go create mode 100644 vendor/go.mongodb.org/mongo-driver/internal/semaphore.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/client.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/collection.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/crypt_retrievers.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/cursor.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/database.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/errors.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/index_options_builder.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/index_view.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/mongo.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_10.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions_1_9.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/datakeyoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/deleteoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/distinctoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/encryptoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/estimatedcountoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/findoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/gridfsoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/indexoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/insertoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/listcollectionsoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/listdatabasesoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/replaceoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/runcmdoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/sessionoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/transactionoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/options/updateoptions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/readconcern/readconcern.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/readpref/mode.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/readpref/options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/readpref/readpref.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/results.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/session.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/single_result.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/util.go create mode 100644 vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go create mode 100644 vendor/go.mongodb.org/mongo-driver/tag/tag.go create mode 100644 vendor/go.mongodb.org/mongo-driver/version/version.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/array.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/tables.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/constructor.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/document.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/element.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/mdocument.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/primitive_codecs.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/registry.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/value.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/address/addr.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/auth.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/cred.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/default.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_enabled.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_supported.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.c create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.h create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.c create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.h create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbcr.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/plain.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/sasl.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/scram.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/util.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/x509.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batch_cursor.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batches.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/crypt.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/description/description.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/description/feature.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/description/server.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/description/server_kind.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/description/server_selector.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/description/topology.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/description/topology_kind.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/description/version.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/description/version_range.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/errors.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/legacy.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/list_collections_batch_cursor.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/binary.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/errors_not_enabled.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context_not_enabled.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context_not_enabled.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_context_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/provider_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/state.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/abort_transaction.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/commit_transaction.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/count.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/createIndexes.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/delete.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/distinct.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_collection.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_database.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_indexes.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/end_sessions.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find_and_modify.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/insert.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/ismaster.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/listDatabases.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_collections.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_indexes.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/operation.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation_legacy.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/client_session.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/cluster_clock.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/server_session.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/session_pool.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_legacy.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_legacy_command_metadata.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/errors.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/fsm.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/resource_pool.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options_1_10.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options_1_9.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/uuid/uuid.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go create mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go create mode 100644 vendor/golang.org/x/sync/AUTHORS create mode 100644 vendor/golang.org/x/sync/CONTRIBUTORS create mode 100644 vendor/golang.org/x/sync/LICENSE create mode 100644 vendor/golang.org/x/sync/PATENTS create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go create mode 100644 vendor/golang.org/x/sys/windows/registry/key.go create mode 100644 vendor/golang.org/x/sys/windows/registry/mksyscall.go create mode 100644 vendor/golang.org/x/sys/windows/registry/syscall.go create mode 100644 vendor/golang.org/x/sys/windows/registry/value.go create mode 100644 vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go create mode 100644 vendor/golang.org/x/sys/windows/svc/eventlog/install.go create mode 100644 vendor/golang.org/x/sys/windows/svc/eventlog/log.go create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/COPYING create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/actions.go create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/app.go create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/args.go create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/cmd.go create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/completions.go create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/doc.go create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/envar.go create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/flags.go create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/global.go create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth.go create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth_unix.go create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/model.go create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/parser.go create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/parsers.go create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/templates.go create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/usage.go create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/values.go create mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/values_generated.go diff --git a/Gopkg.lock b/Gopkg.lock index 870e272..8ac81eb 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,6 +1,25 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. +[[projects]] + branch = "master" + digest = "1:f3793f8a708522400cef1dba23385e901aede5519f68971fd69938ef330b07a1" + name = "github.com/alecthomas/template" + packages = [ + ".", + "parse", + ] + pruneopts = "NUT" + revision = "fb15b899a75114aa79cc930e33c46b577cc664b1" + +[[projects]] + branch = "master" + digest = "1:278f733beeec2d654fc7412dbfa2dff194245e13c720bef8d4e284e0e25708f5" + name = "github.com/alecthomas/units" + packages = ["."] + pruneopts = "NUT" + revision = "f65c72e2690dc4b403c8bd637baf4611cd4c069b" + [[projects]] digest = "1:e5ca3dcabf1452b51be600af6e2ce0a93a94978ae231af802cf9736bdbd835cb" name = "github.com/armon/go-radix" @@ -148,6 +167,14 @@ revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" version = "v1.0.0" +[[projects]] + digest = "1:91358b3a314c1ddfd4d6445ca9c1fb846842c31c153e626730594b3c95f73f4a" + name = "github.com/go-stack/stack" + packages = ["."] + pruneopts = "NUT" + revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a" + version = "v1.8.0" + [[projects]] digest = "1:700c0988d24fac04439828af2aab8e7d1638cea3bc91ff20d2a2befc970ff165" name = "github.com/gogo/protobuf" @@ -177,6 +204,14 @@ revision = "925541529c1fa6821df4e44ce2723319eb2be768" version = "v1.0.0" +[[projects]] + branch = "master" + digest = "1:8cf1ded24c311b8675cd5c9a15bde6bff7a8132b8979dd881d8e58c7c2423a7a" + name = "github.com/golang/snappy" + packages = ["."] + pruneopts = "NUT" + revision = "ff6b7dc882cf4cfba7ee0b9f7dcc1ac096c554aa" + [[projects]] branch = "master" digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107" @@ -252,6 +287,20 @@ revision = "2eee05ed794112d45db504eb05aa693efd2b8b09" version = "v0.1.0" +[[projects]] + digest = "1:f4246fd2be431716ce5374ce081c164d4e918e854553319e81a0f50776be0389" + name = "github.com/klauspost/compress" + packages = [ + "fse", + "huff0", + "snappy", + "zstd", + "zstd/internal/xxhash", + ] + pruneopts = "NUT" + revision = "418e7472c4dd2c344aa04672c5b2a5479f87e918" + version = "v1.10.8" + [[projects]] digest = "1:4059c14e87a2de3a434430340521b5feece186c1469eff0834c29a63870de3ed" name = "github.com/konsorten/go-windows-terminal-sequences" @@ -313,11 +362,12 @@ [[projects]] branch = "master" - digest = "1:06375f3b602de9c99fa99b8484f0e949fd5273e6e9c6592b5a0dd4cd9085f3ea" + digest = "1:d26cc642d82ce3ee1067c3fc60668cf79032759351bd42f0d8d2f538dabce2b3" name = "github.com/prometheus/common" packages = [ "expfmt", "internal/bitbucket.org/ww/goautoneg", + "log", "model", ] pruneopts = "NUT" @@ -384,6 +434,22 @@ revision = "b4c50a2b199d93b13dc15e78929cfb23bfdf21ab" version = "v1.1.1" +[[projects]] + branch = "master" + digest = "1:b48c1dda664c0cab2a8edf5f58d13fd7657e4edee830f8b36626ff5c26c19f80" + name = "github.com/xdg/scram" + packages = ["."] + pruneopts = "NUT" + revision = "7eeb5667e42c09cb51bf7b7c28aea8c56767da90" + +[[projects]] + branch = "master" + digest = "1:f251ba151fadbef161e88848eb8a249752a879e046c2d308d81deaa2086d5053" + name = "github.com/xdg/stringprep" + packages = ["."] + pruneopts = "NUT" + revision = "73f8eece6fdcd902c185bf651de50f3828bed5ed" + [[projects]] digest = "1:e4ffb3b860a4c71f7805415b4ba1e9a27e04eb9c4c03ddd943e9ff1790eda506" name = "github.com/xiang90/probing" @@ -392,6 +458,46 @@ revision = "07dd2e8dfe18522e9c447ba95f2fe95262f63bb2" version = "0.0.1" +[[projects]] + digest = "1:5efcc11eb3c43fe7b9aee947479c7268e1cb30ec2059917f4121d6fb71977103" + name = "go.mongodb.org/mongo-driver" + packages = [ + "bson", + "bson/bsoncodec", + "bson/bsonoptions", + "bson/bsonrw", + "bson/bsontype", + "bson/primitive", + "event", + "internal", + "mongo", + "mongo/options", + "mongo/readconcern", + "mongo/readpref", + "mongo/writeconcern", + "tag", + "version", + "x/bsonx", + "x/bsonx/bsoncore", + "x/mongo/driver", + "x/mongo/driver/address", + "x/mongo/driver/auth", + "x/mongo/driver/auth/internal/gssapi", + "x/mongo/driver/connstring", + "x/mongo/driver/description", + "x/mongo/driver/dns", + "x/mongo/driver/mongocrypt", + "x/mongo/driver/mongocrypt/options", + "x/mongo/driver/operation", + "x/mongo/driver/session", + "x/mongo/driver/topology", + "x/mongo/driver/uuid", + "x/mongo/driver/wiremessage", + ] + pruneopts = "NUT" + revision = "4ce2db174a8ec022f504b9bc0e768e284e44708f" + version = "v1.3.4" + [[projects]] digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7" name = "go.uber.org/atomic" @@ -425,11 +531,12 @@ [[projects]] branch = "master" - digest = "1:001a4e7a40e50ff2ef32e2556bca50c4f77daa457db3ac6afc8bea9bb2122cfb" + digest = "1:7310f5459b88177d24e26c5ec7deb100b78ec03c0437d7a7aaec3c7eac333a55" name = "golang.org/x/crypto" packages = [ "bcrypt", "blowfish", + "pbkdf2", "ssh/terminal", ] pruneopts = "NUT" @@ -453,11 +560,21 @@ [[projects]] branch = "master" - digest = "1:0ceea664a3d703218d433440b91555c0e918681e50a4a3d10c3f6166a1373a6d" + digest = "1:4692f916cb72b2c295f04841036d85a3f13e96d1cc9e8e4c2c30edebac518053" + name = "golang.org/x/sync" + packages = ["semaphore"] + pruneopts = "NUT" + revision = "43a5402ce75a95522677f77c619865d66b8c57ab" + +[[projects]] + branch = "master" + digest = "1:2ff0c7befb2422e342e97c8086c1d86bbe47708a70aa51981c47bf61fff75f96" name = "golang.org/x/sys" packages = [ "unix", "windows", + "windows/registry", + "windows/svc/eventlog", ] pruneopts = "NUT" revision = "3b87a42e500a6dc65dae1a55d0b641295971163e" @@ -537,6 +654,14 @@ revision = "8e4536a86ab602859c20df5ebfd0bd4228d08655" version = "v1.10.0" +[[projects]] + digest = "1:22b2dee6f30bc8601f087449a2a819df8388e54e9547349c658f14d8f8c590f2" + name = "gopkg.in/alecthomas/kingpin.v2" + packages = ["."] + pruneopts = "NUT" + revision = "947dcec5ba9c011838740e680966fd7087a71d0d" + version = "v2.2.6" + [[projects]] digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082" name = "gopkg.in/yaml.v2" @@ -559,12 +684,16 @@ "github.com/gorilla/mux", "github.com/natefinch/lumberjack", "github.com/pkg/errors", + "github.com/prometheus/common/log", "github.com/sirupsen/logrus", "github.com/spf13/cobra", "github.com/spf13/pflag", + "go.mongodb.org/mongo-driver/mongo", "golang.org/x/net/context", "google.golang.org/grpc", + "google.golang.org/grpc/codes", "google.golang.org/grpc/reflection", + "google.golang.org/grpc/status", ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 0d28788..05ccea8 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -90,3 +90,7 @@ ignored = ["github.com/teramoby/speedle-plus/samples*"] [[override]] name = "github.com/coreos/bbolt" revision = "48ea1b39c25fc1bab3506fbc712ecbaa842c4d2d" + +[[constraint]] + name = "go.mongodb.org/mongo-driver" + version = "1.3.4" diff --git a/Makefile b/Makefile index 2691ed9..ab28d46 100644 --- a/Makefile +++ b/Makefile @@ -45,31 +45,39 @@ speedleUnitTests: go test ${TEST_OPTS} github.com/teramoby/speedle-plus/pkg/eval go test ${TEST_OPTS} github.com/teramoby/speedle-plus/pkg/store/file go test ${TEST_OPTS} github.com/teramoby/speedle-plus/pkg/store/etcd + go test ${TEST_OPTS} github.com/teramoby/speedle-plus/pkg/store/mongodb go test ${TEST_OPTS} github.com/teramoby/speedle-plus/cmd/spctl/pdl go test ${TEST_OPTS} github.com/teramoby/speedle-plus/pkg/suid go test ${TEST_OPTS} github.com/teramoby/speedle-plus/pkg/assertion go clean -testcache STORE_TYPE=etcd go test ${TEST_OPTS} github.com/teramoby/speedle-plus/pkg/eval + go clean -testcache + STORE_TYPE=mongodb go test ${TEST_OPTS} github.com/teramoby/speedle-plus/pkg/eval testSpeedleRest: pkg/svcs/pmsrest/run_file_test.sh pkg/svcs/pmsrest/run_etcd_test.sh + pkg/svcs/pmsrest/run_mongodb_test.sh testSpeedleGRpc: pkg/svcs/pmsgrpc/run_file_test.sh pkg/svcs/pmsgrpc/run_etcd_test.sh + pkg/svcs/pmsgrpc/run_mongodb_test.sh testSpeedleRestADSCheck: pkg/svcs/adsrest/run_file_test.sh pkg/svcs/adsrest/run_etcd_test.sh + pkg/svcs/adsrest/run_mongodb_test.sh testSpeedleGRpcADSCheck: pkg/svcs/adsgrpc/run_file_test.sh pkg/svcs/adsgrpc/run_etcd_test.sh + pkg/svcs/adsgrpc/run_mongodb_test.sh testSpctl: cmd/spctl/command/run_file_test.sh cmd/spctl/command/run_etcd_test.sh + cmd/spctl/command/run_mongodb_test.sh testSpeedleTls: pkg/svcs/pmsrest/tls_test.sh diff --git a/api/pms/types.go b/api/pms/types.go index 26a8ae3..c8e7438 100644 --- a/api/pms/types.go +++ b/api/pms/types.go @@ -10,24 +10,24 @@ type Permission struct { } type Function struct { - Name string `json:"name"` - Description string `json:"description,omitempty"` - FuncURL string `json:"funcURL"` //used by speedle/sphinx ADS - LocalFuncURL string `json:"localFuncURL,omitempty"` //used by sphinx runtime proxy to get better performance - CA string `json:"ca,omitempty"` //security related configurations - ResultCachable bool `json:"resultCachable,omitempty"` //false by default - ResultTTL int64 `json:"resultTTL,omitempty"` // TTL of function result in second - Metadata map[string]string `json:"metadata,omitempty"` + Name string `json:"name" bson:"_id"` + Description string `json:"description,omitempty" bson:"description,omitempty"` + FuncURL string `json:"funcURL" bson:"funcurl"` //used by speedle/sphinx ADS + LocalFuncURL string `json:"localFuncURL,omitempty" bson:"localfuncurl"` //used by sphinx runtime proxy to get better performance + CA string `json:"ca,omitempty" bson:"ca,omitempty"` //security related configurations + ResultCachable bool `json:"resultCachable,omitempty" bson:"resultcachable,omitempty"` //false by default + ResultTTL int64 `json:"resultTTL,omitempty" bson:"resultttl,omitempty"` // TTL of function result in second + Metadata map[string]string `json:"metadata,omitempty" bson:"metadata,omitempty"` } type Policy struct { - ID string `json:"id"` - Name string `json:"name"` - Effect string `json:"effect,omitempty"` - Permissions []*Permission `json:"permissions,omitempty"` - Principals [][]string `json:"principals,omitempty"` - Condition string `json:"condition,omitempty"` - Metadata map[string]string `json:"metadata,omitempty"` + ID string `json:"id" bson:"_id"` + Name string `json:"name" bson:"name,omitempty"` + Effect string `json:"effect,omitempty" bson:"effect,omitempty"` + Permissions []*Permission `json:"permissions,omitempty" bson:"permissions,omitempty"` + Principals [][]string `json:"principals,omitempty" bson:"principals,omitempty"` + Condition string `json:"condition,omitempty" bson:"condition,omitempty"` + Metadata map[string]string `json:"metadata,omitempty" bson:"metadata,omitempty"` } const ( @@ -41,23 +41,23 @@ const ( ) type RolePolicy struct { - ID string `json:"id"` - Name string `json:"name"` - Effect string `json:"effect,omitempty"` - Roles []string `json:"roles,omitempty"` - Principals []string `json:"principals,omitempty"` - Resources []string `json:"resources,omitempty"` - ResourceExpressions []string `json:"resourceExpressions,omitempty"` - Condition string `json:"condition,omitempty"` - Metadata map[string]string `json:"metadata,omitempty"` + ID string `json:"id" bson:"_id"` + Name string `json:"name" bson:"name,omitempty"` + Effect string `json:"effect,omitempty" bson:"effect,omitempty"` + Roles []string `json:"roles,omitempty" bson:"roles,omitempty"` + Principals []string `json:"principals,omitempty" bson:"principals,omitempty"` + Resources []string `json:"resources,omitempty" bson:"resources,omitempty"` + ResourceExpressions []string `json:"resourceExpressions,omitempty" bson:"resourceexpressions,omitempty"` + Condition string `json:"condition,omitempty" bson:"condition,omitempty"` + Metadata map[string]string `json:"metadata,omitempty" bson:"metadata,omitempty"` } type Service struct { - Name string `json:"name" binding:"required"` - Type string `json:"type,omitempty"` - Policies []*Policy `json:"policies,omitempty"` - RolePolicies []*RolePolicy `json:"rolePolicies,omitempty"` - Metadata map[string]string `json:"metadata,omitempty"` + Name string `json:"name" binding:"required" bson:"_id"` + Type string `json:"type,omitempty" bson:"type,omitempty"` + Policies []*Policy `json:"policies,omitempty" bson:"policies,omitempty"` + RolePolicies []*RolePolicy `json:"rolePolicies,omitempty" bson:"rolepolicies,omitempty"` + Metadata map[string]string `json:"metadata,omitempty" bson:"metadata,omitempty"` } const GlobalService = "global" diff --git a/cmd/spctl/command/run_mongodb_test.sh b/cmd/spctl/command/run_mongodb_test.sh new file mode 100755 index 0000000..96885c5 --- /dev/null +++ b/cmd/spctl/command/run_mongodb_test.sh @@ -0,0 +1,13 @@ +#!/bin/bash +set -ex +source ${GOPATH}/src/github.com/teramoby/speedle-plus/setTestEnv.sh +go clean -testcache + +#Reconfig spctl +${GOPATH}/bin/spctl config pms-endpoint http://localhost:6733/policy-mgmt/v1/ + +startPMS mongodb --config-file ${GOPATH}/src/github.com/teramoby/speedle-plus/pkg/svcs/pmsrest/config_mongodb.json +sleep 5 +${GOPATH}/bin/spctl delete service --all +go test ${TEST_OPTS} github.com/teramoby/speedle-plus/cmd/spctl/command -run=TestMats + diff --git a/cmd/speedle-ads/stores.go b/cmd/speedle-ads/stores.go index 11a5077..7f3dfb1 100644 --- a/cmd/speedle-ads/stores.go +++ b/cmd/speedle-ads/stores.go @@ -9,4 +9,5 @@ package main import ( _ "github.com/teramoby/speedle-plus/pkg/store/etcd" _ "github.com/teramoby/speedle-plus/pkg/store/file" + _ "github.com/teramoby/speedle-plus/pkg/store/mongodb" ) diff --git a/cmd/speedle-pms/stores.go b/cmd/speedle-pms/stores.go index 11a5077..7f3dfb1 100644 --- a/cmd/speedle-pms/stores.go +++ b/cmd/speedle-pms/stores.go @@ -9,4 +9,5 @@ package main import ( _ "github.com/teramoby/speedle-plus/pkg/store/etcd" _ "github.com/teramoby/speedle-plus/pkg/store/file" + _ "github.com/teramoby/speedle-plus/pkg/store/mongodb" ) diff --git a/pkg/store/mongodb/mongoStoreConfig.json b/pkg/store/mongodb/mongoStoreConfig.json new file mode 100644 index 0000000..8beac85 --- /dev/null +++ b/pkg/store/mongodb/mongoStoreConfig.json @@ -0,0 +1,7 @@ +{ + "storeType": "mongodb", + "storeProps": { + "MongoURI": "mongodb://localhost:27017", + "MongoDatabase": "speedletest" + } +} \ No newline at end of file diff --git a/pkg/store/mongodb/mongoStoreConfigCloud.json b/pkg/store/mongodb/mongoStoreConfigCloud.json new file mode 100644 index 0000000..16bd627 --- /dev/null +++ b/pkg/store/mongodb/mongoStoreConfigCloud.json @@ -0,0 +1,7 @@ +{ + "storeType": "mongodb", + "storeProps": { + "MongoURI": "mongodb+srv://speedleplus:Passw0rd@cluster0-wfhda.mongodb.net/speedletest?retryWrites=true&w=majority", + "MongoDatabase": "speedletest" + } +} \ No newline at end of file diff --git a/pkg/store/mongodb/mongodbStore.go b/pkg/store/mongodb/mongodbStore.go new file mode 100644 index 0000000..215c319 --- /dev/null +++ b/pkg/store/mongodb/mongodbStore.go @@ -0,0 +1,852 @@ +package mongodb + +import ( + "context" + "strings" + "time" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + + log "github.com/sirupsen/logrus" + "github.com/teramoby/speedle-plus/api/pms" + "github.com/teramoby/speedle-plus/pkg/errors" + "github.com/teramoby/speedle-plus/pkg/suid" +) + +type Store struct { + client *mongo.Client + Database string +} + +// ReadPolicyStore reads policy store from a file +func (s *Store) ReadPolicyStore() (*pms.PolicyStore, error) { + + var ps pms.PolicyStore + services, err := s.ListAllServices() + if err != nil { + return nil, err + } + ps.Services = services + functions, err := s.ListAllFunctions("") + if err != nil { + return nil, err + } + ps.Functions = functions + return &ps, nil + +} + +// WritePolicyStore writes policies to a file +func (s *Store) WritePolicyStore(ps *pms.PolicyStore) error { + err := s.DeleteServices() + if err != nil { + return err + } + err = s.DeleteFunctions() + if err != nil { + return err + } + for _, service := range ps.Services { + err = s.CreateService(service) + if err != nil { + return err + } + } + for _, f := range ps.Functions { + _, err = s.CreateFunction(f) + if err != nil { + return err + } + } + return nil + +} + +// ListAllServices lists all the services +func (s *Store) ListAllServices() ([]*pms.Service, error) { + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + cur, err := serviceCollection.Find(ctx, bson.M{}) + if err != nil { + return nil, err + } + defer cur.Close(ctx) + services := []*pms.Service{} + for cur.Next(ctx) { + var service pms.Service + err := cur.Decode(&service) + if err != nil { + return nil, err + } + services = append(services, &service) + } + + return services, err + +} + +// GetServiceNames reads all the service names +func (s *Store) GetServiceNames() ([]string, error) { + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + matchstag := bson.D{{"$match", bson.D{{"$exists", true}}}} + projectstag := bson.D{{"$project", bson.D{{"_id", 1}}}} + opts := options.Aggregate().SetMaxTime(2 * time.Second) + cur, err := serviceCollection.Aggregate(ctx, mongo.Pipeline{matchstag, projectstag}, opts) + if err != nil { + return nil, err + } + var services []pms.Service + if err = cur.All(ctx, &services); err != nil { + panic(err) + } + names := []string{} + if services == nil || len(services) == 0 { + return names, nil + } + for _, service := range services { + names = append(names, service.Name) + } + + return names, nil + +} + +// GetPolicyAndRolePolicyCounts returns a map, in which the key is the service name, and the value is the count of both policies and role policies in the service. +func (s *Store) GetPolicyAndRolePolicyCounts() (map[string]*pms.PolicyAndRolePolicyCount, error) { + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + matchstag := bson.D{{"$match", bson.D{}}} + projectstag := bson.D{{"$project", + bson.D{{"policyCount", bson.D{{"$size", bson.D{{"$ifNull", bson.A{"$policies", bson.A{}}}}}}}, + {"rolepolicyCount", bson.D{{"$size", bson.D{{"$ifNull", bson.A{"$rolepolicies", bson.A{}}}}}}}, + {"count", bson.D{{"$sum", + bson.A{ + bson.D{{"$size", bson.D{{"$ifNull", bson.A{"$policies", bson.A{}}}}}}, + bson.D{{"$size", bson.D{{"$ifNull", bson.A{"$rolepolicies", bson.A{}}}}}}, + }}}}, + }}} + + opts := options.Aggregate().SetMaxTime(5 * time.Second) + cur, err := serviceCollection.Aggregate(ctx, mongo.Pipeline{matchstag, projectstag}, opts) + if err != nil { + return nil, err + } + var results []bson.M + if err = cur.All(ctx, &results); err != nil { + return nil, err + } + countMap := make(map[string]*pms.PolicyAndRolePolicyCount) + if results == nil || len(results) == 0 { + return countMap, nil + } + for _, res := range results { + var counts pms.PolicyAndRolePolicyCount + counts.PolicyCount = int64(res["policyCount"].(int32)) + counts.RolePolicyCount = int64(res["rolepolicyCount"].(int32)) + countMap[res["_id"].(string)] = &counts + } + + return countMap, nil + +} + +// GetServiceCount gets the service count +func (s *Store) GetServiceCount() (int64, error) { + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + num, err := serviceCollection.CountDocuments(ctx, bson.D{}) + if err != nil { + return -1, err + } + + return num, nil +} + +// GetService gets the detailed info of a service +func (s *Store) GetService(serviceName string) (*pms.Service, error) { + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + singleResult := serviceCollection.FindOne(ctx, bson.M{"_id": serviceName}) + if singleResult.Err() != nil { + if singleResult.Err() == mongo.ErrNoDocuments { + return nil, errors.Errorf(errors.EntityNotFound, "service %q is not found", serviceName) + } else { + return nil, singleResult.Err() + } + + } + var service *pms.Service + err := singleResult.Decode(&service) + + return service, err + +} + +func generateID(service *pms.Service) (*pms.Service, error) { + var result pms.Service + result = *service + if result.Policies == nil { + result.Policies = []*pms.Policy{} + } + if result.RolePolicies == nil { + result.RolePolicies = []*pms.RolePolicy{} + } + for _, policy := range result.Policies { + policy.ID = suid.New().String() + } + for _, rolePolicy := range result.RolePolicies { + rolePolicy.ID = suid.New().String() + } + return &result, nil +} + +// CreateService creates a new service +func (s *Store) CreateService(service *pms.Service) error { + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + serviceWithID, _ := generateID(service) + insertResult, err := serviceCollection.InsertOne(ctx, serviceWithID) + if err != nil { + return err + } + log.Info(insertResult.InsertedID) + return nil +} + +// DeleteService deletes a service named ${serviceName} from a file +func (s *Store) DeleteService(serviceName string) error { + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + deleteResult, err := serviceCollection.DeleteOne(ctx, bson.M{"_id": serviceName}) + if err != nil { + return err + } + if deleteResult.DeletedCount == 0 { + return errors.Errorf(errors.EntityNotFound, "service %q is not found", serviceName) + } + return nil +} + +// DeleteServices deletes all services from a file +func (s *Store) DeleteServices() error { + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + err := serviceCollection.Drop(ctx) + return err +} + +func (s *Store) Watch() (pms.StorageChangeChannel, error) { + log.Info("Enter Watch...") + streamOptions := options.ChangeStream().SetFullDocument(options.UpdateLookup) + changeStream, err := s.client.Database(s.Database).Watch(context.TODO(), mongo.Pipeline{}, streamOptions) + if err != nil { + log.Fatal(err) + } + + var storeChangeChan pms.StorageChangeChannel + storeChangeChan = make(chan pms.StoreChangeEvent) + + go func() { + defer func() { + changeStream.Close(context.TODO()) + close(storeChangeChan) + }() + + for changeStream.Next(context.TODO()) { + // A new event variable should be declared for each event. + var event bson.M + if err := changeStream.Decode(&event); err != nil { + log.Error(err) + continue + } + log.Info("-----watched event:", event) + log.Info("---------- fulldocument:", event["fullDocument"]) + var ns bson.M + ns = event["ns"].(bson.M) + + //ns.coll =="services" + if ns["coll"] == "services" { + //operationType == update + if event["operationType"] == "update" { + log.Info("===update service") + id := time.Now().Unix() + var service pms.Service + docb, err := bson.Marshal(event["fullDocument"]) + if err != nil { + log.Error(err) + continue + } + err = bson.Unmarshal(docb, &service) + if err != nil { + log.Error(err) + continue + } + + serviceDeleteEvent := pms.StoreChangeEvent{Type: pms.SERVICE_DELETE, ID: id, Content: []string{service.Name}} + log.Info("serviceDeleteEvent:", serviceDeleteEvent) + storeChangeChan <- serviceDeleteEvent + id = time.Now().Unix() + serviceAddEvent := pms.StoreChangeEvent{Type: pms.SERVICE_ADD, ID: id, Content: &service} + log.Info("serviceAddEvent:", serviceAddEvent) + storeChangeChan <- serviceAddEvent + + } else if event["operationType"] == "insert" { + log.Info("===insert service") + id := time.Now().Unix() + var service pms.Service + docb, err := bson.Marshal(event["fullDocument"]) + if err != nil { + log.Error(err) + continue + } + err = bson.Unmarshal(docb, &service) + if err != nil { + log.Error(err) + continue + } + serviceAddEvent := pms.StoreChangeEvent{Type: pms.SERVICE_ADD, ID: id, Content: &service} + log.Info("###serviceAddEvent:", serviceAddEvent) + storeChangeChan <- serviceAddEvent + + } else if event["operationType"] == "delete" { + log.Info("===delete service") + id := time.Now().Unix() + serviceName := event["documentKey"].(bson.M)["_id"].(string) + serviceDeleteEvent := pms.StoreChangeEvent{Type: pms.SERVICE_DELETE, ID: id, Content: []string{serviceName}} + log.Info("###serviceDeleteEvent:", serviceDeleteEvent) + storeChangeChan <- serviceDeleteEvent + + } + } else if ns["coll"] == "functions" { + + if event["operationType"] == "insert" { + log.Info("===insert function") + id := time.Now().Unix() + var f pms.Function + docb, err := bson.Marshal(event["fullDocument"]) + if err != nil { + log.Error(err) + continue + } + err = bson.Unmarshal(docb, &f) + if err != nil { + log.Error(err) + continue + } + funcAddEvent := pms.StoreChangeEvent{Type: pms.FUNCTION_ADD, ID: id, Content: &f} + log.Info("###funcAddEvent:", funcAddEvent) + storeChangeChan <- funcAddEvent + + } else if event["operationType"] == "delete" { + log.Info("===delete function") + id := time.Now().Unix() + funcName := event["documentKey"].(bson.M)["_id"].(string) + funcDeleteEvent := pms.StoreChangeEvent{Type: pms.FUNCTION_DELETE, ID: id, Content: []string{funcName}} + log.Info("###funcDeleteEvent:", funcDeleteEvent) + storeChangeChan <- funcDeleteEvent + + } + } + + } + log.Info("###exit for loop") + + if err := changeStream.Err(); err != nil { + log.Error(err) + } + + }() + + return storeChangeChan, nil + +} + +func (s *Store) StopWatch() { + +} + +func (s *Store) Type() string { + return StoreType +} + +func parseFilter(filterStr string) (bson.D, error) { + if len(filterStr) == 0 { + return bson.D{{"$eq", bson.A{1, 1}}}, nil + } + values := strings.Split(filterStr, " ") + if len(values) == 2 { + field := values[0] + operator := values[1] + switch operator { + case "pr": + return bson.D{{"$gt", bson.A{"$$p." + field, nil}}}, nil + default: + log.Error("invalid name filter:", filterStr) + return nil, errors.Errorf(errors.InvalidRequest, "invalid filter %q", filterStr) + } + + } else if len(values) == 3 { + field := values[0] + operator := values[1] + target := values[2] + switch operator { + case "eq": + return bson.D{{"$eq", bson.A{"$$p." + field, target}}}, nil + case "co": + return bson.D{{"$gte", bson.A{bson.D{{"$indexOfBytes", bson.A{"$$p." + field, target}}}, 0}}}, nil + case "sw": + return bson.D{{"$eq", bson.A{0, bson.D{{"$indexOfBytes", bson.A{"$$p." + field, target}}}}}}, nil + case "gt": + return bson.D{{"$gt", bson.A{"$$p." + field, target}}}, nil + case "ge": + return bson.D{{"$gte", bson.A{"$$p." + field, target}}}, nil + case "lt": + return bson.D{{"$lt", bson.A{"$$p." + field, target}}}, nil + case "le": + return bson.D{{"$lte", bson.A{"$$p." + field, target}}}, nil + default: + log.Error("invalid name filter:", filterStr) + return nil, errors.Errorf(errors.InvalidRequest, "invalid filter %q", filterStr) + } + + } else { + log.Error("invalid filter string:", filterStr) + return nil, errors.Errorf(errors.InvalidRequest, "invalid filter %q", filterStr) + } + +} + +// For policy manager +func (s *Store) ListAllPolicies(serviceName string, filter string) ([]*pms.Policy, error) { + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + matchstag := bson.D{{"$match", bson.D{{"_id", serviceName}}}} + condition, err := parseFilter(filter) + if err != nil { + return nil, err + } + projectstag := bson.D{ + {"$project", bson.D{ + {"policies", bson.D{ + {"$filter", bson.D{ + {"input", "$policies"}, + {"as", "p"}, + {"cond", condition}}, + }}, + }}, + }} + opts := options.Aggregate().SetMaxTime(2 * time.Second) + cur, err := serviceCollection.Aggregate(ctx, mongo.Pipeline{matchstag, projectstag}, opts) + if err != nil { + return nil, err + } + var services []pms.Service + if err = cur.All(ctx, &services); err != nil { + return nil, errors.New(errors.StoreError, err.Error()) + } + if services == nil || len(services) == 0 { + return nil, errors.Errorf(errors.EntityNotFound, "service %q is not found", serviceName) + } + + return services[0].Policies, nil + +} + +func (s *Store) GetPolicyCount(serviceName string) (int64, error) { + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + var matchstag bson.D + if len(serviceName) != 0 { + matchstag = bson.D{{"$match", bson.D{{"_id", serviceName}}}} + } else { + matchstag = bson.D{{"$match", bson.D{}}} + } + + projectstag := bson.D{{"$project", bson.D{{"policycount", bson.D{{"$size", bson.D{{"$ifNull", bson.A{"$policies", bson.A{}}}}}}}}}} + + opts := options.Aggregate().SetMaxTime(5 * time.Second) + cur, err := serviceCollection.Aggregate(ctx, mongo.Pipeline{matchstag, projectstag}, opts) + if err != nil { + return -1, err + } + var results []bson.M + if err = cur.All(ctx, &results); err != nil { + panic(err) + } + if results == nil || len(results) == 0 { + if len(serviceName) != 0 { + return -1, errors.Errorf(errors.EntityNotFound, "service %q is not found", serviceName) + } else { + return 0, nil + } + } + policyCount := int64(0) + + for _, res := range results { + policyCount += int64(res["policycount"].(int32)) + } + + return policyCount, nil + +} + +func (s *Store) GetPolicy(serviceName string, id string) (*pms.Policy, error) { + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + matchstag := bson.D{{"$match", bson.D{{"_id", serviceName}}}} + projectstag := bson.D{ + {"$project", bson.D{ + {"policies", bson.D{ + {"$filter", bson.D{ + {"input", "$policies"}, + {"as", "p"}, + {"cond", bson.D{{"$eq", bson.A{"$$p._id", id}}}}}, + }}, + }}, + }} + opts := options.Aggregate().SetMaxTime(5 * time.Second) + cur, err := serviceCollection.Aggregate(ctx, mongo.Pipeline{matchstag, projectstag}, opts) + if err != nil { + return nil, err + } + var services []pms.Service + if err = cur.All(ctx, &services); err != nil { + panic(err) + } + if services == nil || len(services) == 0 { + return nil, errors.Errorf(errors.EntityNotFound, "service %q is not found", serviceName) + } + if services[0].Policies == nil || len(services[0].Policies) == 0 { + return nil, errors.Errorf(errors.EntityNotFound, "policy %q is not found", id) + } + return services[0].Policies[0], nil + +} + +func (s *Store) DeletePolicy(serviceName string, id string) error { + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + filter := bson.D{{"_id", serviceName}} + update := bson.D{{"$pull", bson.D{{"policies", bson.D{{"_id", id}}}}}} + result, err := serviceCollection.UpdateOne(ctx, filter, update) + if err != nil { + return err + } + if result.MatchedCount == 0 { + return errors.Errorf(errors.EntityNotFound, "service %q is not found", serviceName) + } + if result.ModifiedCount == 0 { + return errors.Errorf(errors.EntityNotFound, "policy %q is not found", id) + } + return nil + +} + +func (s *Store) DeletePolicies(serviceName string) error { + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + filter := bson.D{{"_id", serviceName}} + update := bson.D{{"$pull", bson.D{{"policies", bson.D{{"$exists", true}}}}}} + result := serviceCollection.FindOneAndUpdate(ctx, filter, update) + if result.Err() == mongo.ErrNoDocuments { + return errors.Errorf(errors.EntityNotFound, "service %q is not found", serviceName) + } else { + return result.Err() + } + +} + +func (s *Store) CreatePolicy(serviceName string, policy *pms.Policy) (*pms.Policy, error) { + dupPolicy := *policy + dupPolicy.ID = suid.New().String() + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + filter := bson.D{{"_id", serviceName}} + update := bson.D{{"$push", bson.D{{"policies", dupPolicy}}}} + result := serviceCollection.FindOneAndUpdate(ctx, filter, update) + if result.Err() == nil { + return &dupPolicy, nil + } else if result.Err() == mongo.ErrNoDocuments { + return nil, errors.Errorf(errors.EntityNotFound, "service %q is not found", serviceName) + } else { + return nil, result.Err() + } + +} + +// For role policy manager +func (s *Store) ListAllRolePolicies(serviceName string, filter string) ([]*pms.RolePolicy, error) { + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + matchstag := bson.D{{"$match", bson.D{{"_id", serviceName}}}} + condition, err := parseFilter(filter) + if err != nil { + return nil, err + } + projectstag := bson.D{ + {"$project", bson.D{ + {"rolepolicies", bson.D{ + {"$filter", bson.D{ + {"input", "$rolepolicies"}, + {"as", "p"}, + {"cond", condition}}, + }}, + }}, + }} + opts := options.Aggregate().SetMaxTime(2 * time.Second) + cur, err := serviceCollection.Aggregate(ctx, mongo.Pipeline{matchstag, projectstag}, opts) + if err != nil { + return nil, err + } + var services []pms.Service + if err = cur.All(ctx, &services); err != nil { + return nil, errors.New(errors.StoreError, err.Error()) + } + if services == nil || len(services) == 0 { + return nil, errors.Errorf(errors.EntityNotFound, "service %q is not found", serviceName) + } + + return services[0].RolePolicies, nil + +} + +func (s *Store) GetRolePolicyCount(serviceName string) (int64, error) { + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + var matchstag bson.D + if len(serviceName) != 0 { + matchstag = bson.D{{"$match", bson.D{{"_id", serviceName}}}} + } else { + matchstag = bson.D{{"$match", bson.D{}}} + } + + projectstag := bson.D{{"$project", bson.D{{"policycount", bson.D{{"$size", bson.D{{"$ifNull", bson.A{"$rolepolicies", bson.A{}}}}}}}}}} + + opts := options.Aggregate().SetMaxTime(5 * time.Second) + cur, err := serviceCollection.Aggregate(ctx, mongo.Pipeline{matchstag, projectstag}, opts) + if err != nil { + return -1, err + } + var results []bson.M + if err = cur.All(ctx, &results); err != nil { + panic(err) + } + if results == nil || len(results) == 0 { + if len(serviceName) != 0 { + return -1, errors.Errorf(errors.EntityNotFound, "service %q is not found", serviceName) + } else { + return 0, nil + } + } + policyCount := int64(0) + + for _, res := range results { + policyCount += int64(res["policycount"].(int32)) + } + + return policyCount, nil + +} + +func (s *Store) GetRolePolicy(serviceName string, id string) (*pms.RolePolicy, error) { + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + matchstag := bson.D{{"$match", bson.D{{"_id", serviceName}}}} + projectstag := bson.D{ + {"$project", bson.D{ + {"rolepolicies", bson.D{ + {"$filter", bson.D{ + {"input", "$rolepolicies"}, + {"as", "p"}, + {"cond", bson.D{{"$eq", bson.A{"$$p._id", id}}}}}, + }}, + }}, + }} + opts := options.Aggregate().SetMaxTime(5 * time.Second) + cur, err := serviceCollection.Aggregate(ctx, mongo.Pipeline{matchstag, projectstag}, opts) + if err != nil { + return nil, err + } + var services []pms.Service + if err = cur.All(ctx, &services); err != nil { + panic(err) + } + if services == nil || len(services) == 0 { + return nil, errors.Errorf(errors.EntityNotFound, "service %q is not found", serviceName) + } + if services[0].RolePolicies == nil || len(services[0].RolePolicies) == 0 { + return nil, errors.Errorf(errors.EntityNotFound, "rolepolicy %q is not found", id) + } + return services[0].RolePolicies[0], nil + +} + +func (s *Store) DeleteRolePolicy(serviceName string, id string) error { + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + filter := bson.D{{"_id", serviceName}} + update := bson.D{{"$pull", bson.D{{"rolepolicies", bson.D{{"_id", id}}}}}} + result, err := serviceCollection.UpdateOne(ctx, filter, update) + if err != nil { + return err + } + if result.MatchedCount == 0 { + return errors.Errorf(errors.EntityNotFound, "service %q is not found", serviceName) + } + if result.ModifiedCount == 0 { + return errors.Errorf(errors.EntityNotFound, "rolepolicy %q is not found", id) + } + return nil + +} + +func (s *Store) DeleteRolePolicies(serviceName string) error { + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + filter := bson.D{{"_id", serviceName}} + update := bson.D{{"$pull", bson.D{{"rolepolicies", bson.D{{"$exists", true}}}}}} + result := serviceCollection.FindOneAndUpdate(ctx, filter, update) + if result.Err() == mongo.ErrNoDocuments { + return errors.Errorf(errors.EntityNotFound, "service %q is not found", serviceName) + } else { + return result.Err() + } + +} + +func (s *Store) CreateRolePolicy(serviceName string, rolePolicy *pms.RolePolicy) (*pms.RolePolicy, error) { + dupPolicy := *rolePolicy + dupPolicy.ID = suid.New().String() + serviceCollection := s.client.Database(s.Database).Collection("services") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + filter := bson.D{{"_id", serviceName}} + update := bson.D{{"$push", bson.D{{"rolepolicies", dupPolicy}}}} + result := serviceCollection.FindOneAndUpdate(ctx, filter, update) + if result.Err() == nil { + return &dupPolicy, nil + } else if result.Err() == mongo.ErrNoDocuments { + return nil, errors.Errorf(errors.EntityNotFound, "service %q is not found", serviceName) + } else { + return nil, result.Err() + } +} + +func validateFunc(function *pms.Function) error { + if function.Name == "" || function.FuncURL == "" { + return errors.New(errors.InvalidRequest, "\"name\" and \"funcURL\" in function definition can not be empty") + } + return nil +} + +func (s *Store) CreateFunction(function *pms.Function) (*pms.Function, error) { + if err := validateFunc(function); err != nil { + return nil, err + } + serviceCollection := s.client.Database(s.Database).Collection("functions") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + insertResult, err := serviceCollection.InsertOne(ctx, function) + if err != nil { + return nil, err + } + log.Info(insertResult.InsertedID) + return function, nil + +} + +func (s *Store) DeleteFunction(funcName string) error { + serviceCollection := s.client.Database(s.Database).Collection("functions") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + deleteResult, err := serviceCollection.DeleteOne(ctx, bson.M{"_id": funcName}) + if err != nil { + return err + } + if deleteResult.DeletedCount == 0 { + return errors.Errorf(errors.EntityNotFound, "function %q is not found", funcName) + } + return nil + +} + +func (s *Store) DeleteFunctions() error { + serviceCollection := s.client.Database(s.Database).Collection("functions") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + err := serviceCollection.Drop(ctx) + return err + +} + +func (s *Store) GetFunction(funcName string) (*pms.Function, error) { + serviceCollection := s.client.Database(s.Database).Collection("functions") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + singleResult := serviceCollection.FindOne(ctx, bson.M{"_id": funcName}) + if singleResult.Err() != nil { + return nil, singleResult.Err() + } + var f *pms.Function + err := singleResult.Decode(&f) + + return f, err + +} + +func (s *Store) ListAllFunctions(filter string) ([]*pms.Function, error) { + serviceCollection := s.client.Database(s.Database).Collection("functions") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + cur, err := serviceCollection.Find(ctx, bson.M{}) + if err != nil { + return nil, err + } + defer cur.Close(ctx) + functions := []*pms.Function{} + for cur.Next(ctx) { + var f pms.Function + err := cur.Decode(&f) + if err != nil { + return nil, err + } + functions = append(functions, &f) + } + return functions, nil + +} + +func (s *Store) GetFunctionCount() (int64, error) { + serviceCollection := s.client.Database(s.Database).Collection("functions") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + num, err := serviceCollection.CountDocuments(ctx, bson.D{}) + if err != nil { + return -1, err + } + + return num, nil + +} diff --git a/pkg/store/mongodb/mongodbStore_test.go b/pkg/store/mongodb/mongodbStore_test.go new file mode 100644 index 0000000..0f4ee50 --- /dev/null +++ b/pkg/store/mongodb/mongodbStore_test.go @@ -0,0 +1,692 @@ +package mongodb + +import ( + "fmt" + "log" + "os" + "testing" + + "github.com/teramoby/speedle-plus/api/pms" + "github.com/teramoby/speedle-plus/pkg/cfg" + "github.com/teramoby/speedle-plus/pkg/store" +) + +var storeConfig *cfg.StoreConfig + +func TestMain(m *testing.M) { + os.Exit(testMain(m)) +} + +func testMain(m *testing.M) int { + var err error + storeConfig, err = cfg.ReadStoreConfig("./mongoStoreConfig.json") + if err != nil { + log.Fatal("fail to read config file", err) + } + fmt.Println(storeConfig) + return m.Run() +} + +func TestWriteReadPolicyStore(t *testing.T) { + store, err := store.NewStore(storeConfig.StoreType, storeConfig.StoreProps) + if err != nil { + t.Fatal("fail to new mongodb store:", err) + } + + if psOrigin, err := store.ReadPolicyStore(); err != nil { + t.Fatal("fail to read mongodb store:", err) + } else { + t.Log("existing number of apps:", len(psOrigin.Services)) + } + + var ps pms.PolicyStore + for i := 0; i < 10; i++ { + service := pms.Service{Name: fmt.Sprintf("app%d", i), Type: pms.TypeApplication} + ps.Services = append(ps.Services, &service) + } + err = store.WritePolicyStore(&ps) + if err != nil { + t.Fatal("fail to write policy store:", err) + } + var psr *pms.PolicyStore + psr, err = store.ReadPolicyStore() + if err != nil { + t.Fatal("fail to read policy store:", err) + } + if 10 != len(psr.Services) { + t.Error("should have 10 applications in the store") + } + for _, app := range psr.Services { + t.Log(app.Name, " ") + } +} + +func TestWriteReadDeleteService(t *testing.T) { + store, err := store.NewStore(storeConfig.StoreType, storeConfig.StoreProps) + if err != nil { + t.Fatal("fail to new mongodb store:", err) + } + //clean the service firstly + err = store.DeleteService("service1") + + app := pms.Service{Name: "service1", Type: pms.TypeApplication} + num := 100 + i := 0 + for i < num { + var rolePolicy pms.RolePolicy + rolePolicy.Name = fmt.Sprintf("rp%d", i) + rolePolicy.Effect = "grant" + rolePolicy.Roles = []string{fmt.Sprintf("role%d", i)} + rolePolicy.Principals = []string{"user:Alice"} + app.RolePolicies = append(app.RolePolicies, &rolePolicy) + i++ + } + i = 0 + for i < num { + var policy pms.Policy + policy.Name = fmt.Sprintf("policy%d", i) + policy.Effect = "grant" + policy.Permissions = []*pms.Permission{ + { + Resource: "/node1", + Actions: []string{"get", "create", "delete"}, + }, + } + policy.Principals = [][]string{{"user:Alice"}} + app.Policies = append(app.Policies, &policy) + i++ + } + err = store.CreateService(&app) + if err != nil { + t.Log("fail to create application:", err) + t.FailNow() + } + appr, errr := store.GetService("service1") + if errr != nil { + t.Log("fail to get application:", errr) + t.FailNow() + } + if "service1" != appr.Name { + t.Log("app name should be service1") + t.FailNow() + } + if pms.TypeApplication != appr.Type { + t.Log("app type should be ", pms.TypeApplication) + t.FailNow() + } + if num != len(appr.RolePolicies) { + t.Logf("role policy number should be %d, but %d.", num, len(appr.RolePolicies)) + t.FailNow() + } + if num != len(appr.Policies) { + t.Log("policy number should be ", num) + t.FailNow() + } + //test create same name service + sameNameApp := pms.Service{Name: "service1", Type: pms.TypeApplication} + err = store.CreateService(&sameNameApp) + if err != nil { + fmt.Println("SAME NAME APP CREATION ERR", err) + } + + err = store.DeleteService("service1") + if err != nil { + t.Log("fail to delete application:", err) + t.FailNow() + } + appr, err = store.GetService("service1") + t.Log("get non exist service:", err) + if err == nil { + t.Log("should fail as app is already deleted") + t.FailNow() + } + err = store.DeleteService("nonexist-service") + t.Log("delete non exist service:", err) + if err == nil { + t.Log("should fail as the service does not exist") + t.FailNow() + } +} + +func TestMongoStore_GetPolicyByName(t *testing.T) { + store, err := store.NewStore(storeConfig.StoreType, storeConfig.StoreProps) + if err != nil { + t.Fatal("fail to new mongodb store:", err) + } + //clean the service firstly + serviceName := "service1" + + err = store.DeleteService(serviceName) + if err != nil { + t.Log("deleteing service1, err:", err) + } + + app := pms.Service{Name: serviceName, Type: pms.TypeApplication} + num := 10 + i := 0 + for i < num { + var policy pms.Policy + policy.Name = fmt.Sprintf("policy%d", i) + policy.Effect = "grant" + policy.Permissions = []*pms.Permission{ + { + Resource: "/node1", + Actions: []string{"get", "create", "delete"}, + }, + } + policy.Principals = [][]string{{"user:Alice"}} + app.Policies = append(app.Policies, &policy) + i++ + } + blankNamePolicy := pms.Policy{ + Effect: "grant", + Permissions: []*pms.Permission{ + { + Resource: "/node1", + Actions: []string{"get", "create", "delete"}, + }, + }, + Principals: [][]string{{"user:Alice"}}, + } + app.Policies = append(app.Policies, &blankNamePolicy) + duplicateNamePolicy := pms.Policy{ + Name: "policy0", + Effect: "grant", + Permissions: []*pms.Permission{ + { + Resource: "/node1", + Actions: []string{"get", "create", "delete"}, + }, + }, + Principals: [][]string{{"user:Alice"}}, + } + app.Policies = append(app.Policies, &duplicateNamePolicy) + + err = store.CreateService(&app) + if err != nil { + t.Log("fail to create application:", err) + t.FailNow() + } + service, errr := store.GetService(serviceName) + if errr != nil { + t.Log("fail to get application:", err) + t.FailNow() + } + poilcyName := "policy0" + + policyArrListed, err := store.ListAllPolicies(service.Name, "name eq "+poilcyName) + if err != nil { + t.Fatal("Failed to list polices for service:", service.Name, err) + } + + if len(policyArrListed) != 2 { //2 policy0 policies + t.Fatal("get poilcy by name didn't get expected policies! ") + } + + policyArrListed, err = store.ListAllPolicies(service.Name, "name co "+poilcyName) + if err != nil { + t.Fatal("Failed to list polices for service:", service.Name, err) + } + if len(policyArrListed) != 2 { //2 policy0 policies + t.Fatal("get poilcy by name didn't get expected policies! ", 2, len(policyArrListed)) + } + + policyArrListed, err = store.ListAllPolicies(service.Name, "name sw "+poilcyName) + if err != nil { + t.Fatal("Failed to list polices for service:", service.Name, err) + } + if len(policyArrListed) != 2 { //2 policy0 policies + t.Fatal("get poilcy by name didn't get expected policies! ") + } + + policyArrListed, err = store.ListAllPolicies(service.Name, "name gt "+poilcyName) + if err != nil { + t.Fatal("Failed to list polices for service:", service.Name, err) + } + if len(policyArrListed) != num-1 { //all policy name great than policy0 + t.Fatal("get poilcy by name didn't get expected policies! ") + } + + policyArrListed, err = store.ListAllPolicies(service.Name, "name ge "+poilcyName) + if err != nil { + t.Fatal("Failed to list polices for service:", service.Name, err) + } + if len(policyArrListed) != num+1 { //all policy name great than or equals to policy0 + t.Fatal("get poilcy by name didn't get expected policies! ") + } + + policyArrListed, err = store.ListAllPolicies(service.Name, "name lt "+poilcyName) + if err != nil { + t.Fatal("Failed to list polices for service:", service.Name, err) + } + if len(policyArrListed) != 1 { //1 blank name policy + t.Fatal("get poilcy by name didn't get expected policies! ") + } + + policyArrListed, err = store.ListAllPolicies(service.Name, "name le "+poilcyName) + if err != nil { + t.Fatal("Failed to list polices for service:", service.Name, err) + } + if len(policyArrListed) != 3 { //1 blank name policy and 2 duplicate policies + t.Fatal("get poilcy by name didn't get expected policies! ") + } + + policyArrListed, err = store.ListAllPolicies(service.Name, "name le ''") + if err != nil { + t.Fatal("Failed to list polices for service:", service.Name, err) + } + if len(policyArrListed) != 1 { //1 blank name policy + t.Fatal("get poilcy by name didn't get expected policies! ") + } + + policyArrListed, err = store.ListAllPolicies(service.Name, "name pr") + if err != nil { + t.Fatal("Failed to list polices for service:", service.Name, err) + } + if len(policyArrListed) != num+1 { + t.Fatal("Get none blank name poclies failed! ", num+1, len(policyArrListed)) + } + +} + +func TestMongoStore_GetRolePolicyByName(t *testing.T) { + store, err := store.NewStore(storeConfig.StoreType, storeConfig.StoreProps) + if err != nil { + t.Fatal("fail to new mongodb store:", err) + } + //clean the service firstly + serviceName := "service1" + err = store.DeleteService(serviceName) + if err != nil { + t.Log("deleteing service1, err:", err) + } + + app := pms.Service{Name: serviceName, Type: pms.TypeApplication} + num := 100 + i := 0 + for i < num { + var rolePolicy pms.RolePolicy + rolePolicy.Name = fmt.Sprintf("rp%d", i) + rolePolicy.Effect = "grant" + rolePolicy.Roles = []string{fmt.Sprintf("role%d", i)} + rolePolicy.Principals = []string{"user:Alice"} + app.RolePolicies = append(app.RolePolicies, &rolePolicy) + i++ + } + blankNameRolePolicy := pms.RolePolicy{ + Effect: "grant", + Roles: []string{fmt.Sprintf("role%d", i)}, + Principals: []string{"user:Alice"}, + } + app.RolePolicies = append(app.RolePolicies, &blankNameRolePolicy) + + duplicateNameRolePolicy := pms.RolePolicy{ + Name: "rp0", + Effect: "grant", + Roles: []string{fmt.Sprintf("role%d", i)}, + Principals: []string{"user:Alice"}, + } + app.RolePolicies = append(app.RolePolicies, &duplicateNameRolePolicy) + + err = store.CreateService(&app) + if err != nil { + t.Log("fail to create application:", err) + t.FailNow() + } + service, errr := store.GetService(serviceName) + if errr != nil { + t.Log("fail to get application:", err) + t.FailNow() + } + poilcyName := "rp0" + + policyArrListed, err := store.ListAllRolePolicies(service.Name, "name eq "+poilcyName) + if err != nil { + t.Fatal("Failed to list polices for service:", service.Name, err) + } + + if len(policyArrListed) != 2 { //2 policy0 policies + t.Fatal("get poilcy by name didn't get expected policies! ", 2, len(policyArrListed)) + } + + policyArrListed, err = store.ListAllRolePolicies(service.Name, "name co "+poilcyName) + if err != nil { + t.Fatal("Failed to list polices for service:", service.Name, err) + } + if len(policyArrListed) != 2 { //2 policy0 policies + t.Fatal("get poilcy by name didn't get expected policies! ") + } + + policyArrListed, err = store.ListAllRolePolicies(service.Name, "name sw "+poilcyName) + if err != nil { + t.Fatal("Failed to list polices for service:", service.Name, err) + } + if len(policyArrListed) != 2 { //2 policy0 policies + t.Fatal("get poilcy by name didn't get expected policies! ") + } + + policyArrListed, err = store.ListAllRolePolicies(service.Name, "name gt "+poilcyName) + if err != nil { + t.Fatal("Failed to list polices for service:", service.Name, err) + } + if len(policyArrListed) != num-1 { //all policy name great than policy0 + t.Fatal("get poilcy by name didn't get expected policies! ") + } + + policyArrListed, err = store.ListAllRolePolicies(service.Name, "name ge "+poilcyName) + if err != nil { + t.Fatal("Failed to list polices for service:", service.Name, err) + } + if len(policyArrListed) != num+1 { //all policy name great than or equals to policy0 + t.Fatal("get poilcy by name didn't get expected policies! ") + } + + policyArrListed, err = store.ListAllRolePolicies(service.Name, "name lt "+poilcyName) + if err != nil { + t.Fatal("Failed to list polices for service:", service.Name, err) + } + if len(policyArrListed) != 1 { //1 blank name policy + t.Fatal("get poilcy by name didn't get expected policies! ") + } + + policyArrListed, err = store.ListAllRolePolicies(service.Name, "name le "+poilcyName) + if err != nil { + t.Fatal("Failed to list polices for service:", service.Name, err) + } + if len(policyArrListed) != 3 { //1 blank name policy and 2 duplicate policies + t.Fatal("get poilcy by name didn't get expected policies! ") + } + + policyArrListed, err = store.ListAllRolePolicies(service.Name, "name le ''") + if err != nil { + t.Fatal("Failed to list polices for service:", service.Name, err) + } + if len(policyArrListed) != 1 { //1 blank name policy + t.Fatal("get poilcy by name didn't get expected policies! ") + } + + policyArrListed, err = store.ListAllRolePolicies(service.Name, "name pr") + if err != nil { + t.Fatal("Failed to list polices for service:", service.Name, err) + } + if len(policyArrListed) != num+1 { + t.Fatal("Get none blank name poclies failed! ") + } + +} + +func TestManagePolicies(t *testing.T) { + store, err := store.NewStore(storeConfig.StoreType, storeConfig.StoreProps) + if err != nil { + t.Fatal("fail to new mongodb store:", err) + } + //clean the service firstly + store.DeleteService("service1") + app := pms.Service{Name: "service1", Type: pms.TypeApplication} + err = store.CreateService(&app) + if err != nil { + t.Fatal("fail to create application:", err) + } + var policy pms.Policy + policy.Name = fmt.Sprintf("policy1") + policy.Effect = "grant" + policy.Permissions = []*pms.Permission{ + { + Resource: "/node1", + Actions: []string{"get", "create", "delete"}, + }, + } + policy.Principals = [][]string{{"user:Alice"}} + policyR, err := store.CreatePolicy("service1", &policy) + if err != nil { + t.Fatal("fail to create policy:", err) + } + policyR1, err := store.GetPolicy("service1", policyR.ID) + t.Log(policyR1) + if err != nil { + t.Fatal("fail to get policy:", err) + } + + policies, err := store.ListAllPolicies("service1", "") + if err != nil { + t.Fatal("fail to list policies:", err) + } + if len(policies) != 1 { + t.Fatal("should have 1 policy") + } + counts, err := store.GetPolicyAndRolePolicyCounts() + if err != nil { + t.Fatal("Fail to getCounts", err) + } + if counts["service1"].PolicyCount != 1 { + t.Fatal("incorrect policy number") + } + if counts["service1"].RolePolicyCount != 0 { + t.Fatal("incorrect role policy number") + } + + _, err = store.GetPolicy("service1", "nonexistID") + t.Log(err) + if err == nil { + t.Fatal("should fail to get policy") + } + + err = store.DeletePolicy("service1", "nonexistID") + t.Log(err) + if err == nil { + t.Fatal("should fail to delete policy") + } + + err = store.DeletePolicy("service1", policyR.ID) + if err != nil { + t.Fatal("fail to delete policy:", err) + } +} + +func TestManageRolePolicies(t *testing.T) { + store, err := store.NewStore(storeConfig.StoreType, storeConfig.StoreProps) + if err != nil { + t.Fatal("fail to new mongodb store:", err) + } + + //clean the service firstly + store.DeleteService("service1") + app := pms.Service{Name: "service1", Type: pms.TypeApplication} + err = store.CreateService(&app) + if err != nil { + t.Fatal("fail to create application:", err) + } + var rolePolicy pms.RolePolicy + rolePolicy.Name = "rp1" + rolePolicy.Effect = "grant" + rolePolicy.Roles = []string{"role1"} + rolePolicy.Principals = []string{"user:Alice"} + + policyR, err := store.CreateRolePolicy("service1", &rolePolicy) + if err != nil { + t.Fatal("fail to create role policy:", err) + } + policyR1, err := store.GetRolePolicy("service1", policyR.ID) + t.Log(policyR1) + if err != nil { + t.Fatal("fail to get role policy:", err) + } + + rolePolicies, err := store.ListAllRolePolicies("service1", "") + if err != nil { + t.Fatal("fail to list role policies:", err) + } + if len(rolePolicies) != 1 { + t.Fatal("should have 1 role policy") + } + + counts, err := store.GetPolicyAndRolePolicyCounts() + if err != nil { + t.Fatal("Fail to getCounts", err) + } + if counts["service1"].PolicyCount != 0 { + t.Fatal("incorrect policy number") + } + if counts["service1"].RolePolicyCount != 1 { + t.Fatal("incorrect role policy number") + } + + _, err = store.GetRolePolicy("service1", "nonexistID") + t.Log(err) + if err == nil { + t.Fatal("should fail to get role policy") + } + + err = store.DeleteRolePolicy("service1", "nonexistID") + t.Log(err) + if err == nil { + t.Fatal("should fail to delete role policy") + } + + err = store.DeleteRolePolicy("service1", policyR.ID) + if err != nil { + t.Fatal("fail to delete role policy:", err) + } +} + +func TestCheckItemsCount(t *testing.T) { + store, err := store.NewStore(storeConfig.StoreType, storeConfig.StoreProps) + if err != nil { + t.Fatal("fail to new mongodb store:", err) + } + + // clean the services + store.DeleteServices() + + // Create service1 + app1 := pms.Service{Name: "service1", Type: pms.TypeApplication} + err = store.CreateService(&app1) + if err != nil { + t.Fatal("fail to create service:", err) + } + // Check service count + serviceCount, err := store.GetServiceCount() + if err != nil { + t.Fatal("Failed to get service count:", err) + } + if serviceCount != 1 { + t.Fatalf("Service count doesn't match, expected: 1, actual: %d", serviceCount) + } + + // Create policies + policies := []pms.Policy{ + {Name: "p01", Effect: "grant", Principals: [][]string{{"user:user1"}}}, + {Name: "p02", Effect: "grant", Principals: [][]string{{"user:user2"}}}, + {Name: "p03", Effect: "grant", Principals: [][]string{{"user:user3"}}}, + } + for _, policy := range policies { + _, err := store.CreatePolicy("service1", &policy) + if err != nil { + t.Fatal("fail to create policy:", err) + } + } + // Check policy count + policyCount, err := store.GetPolicyCount("service1") + if err != nil { + t.Fatal("Failed to get the policy count: ", err) + } + if policyCount != int64(len(policies)) { + t.Fatalf("Policy count doesn't match, expected:%d, actual:%d", len(policies), policyCount) + } + + // Create Role Policies + rolePolicies := []pms.RolePolicy{ + {Name: "p01", Effect: "grant", Principals: []string{"user:user1"}, Roles: []string{"role1"}}, + {Name: "p02", Effect: "grant", Principals: []string{"user:user2"}, Roles: []string{"role2"}}, + } + for _, rolePolicy := range rolePolicies { + _, err := store.CreateRolePolicy("service1", &rolePolicy) + if err != nil { + t.Fatal("Failed to get role policy count:", err) + } + } + // Check role Policy count + rolePolicyCount, err := store.GetRolePolicyCount("service1") + if err != nil { + t.Fatal("Failed to get the role policy count") + } + if rolePolicyCount != int64(len(rolePolicies)) { + t.Fatalf("RolePolicy count doesn't match, expected:%d, actual:%d", len(rolePolicies), rolePolicyCount) + } + + // Create service2 + app2 := pms.Service{Name: "service2", Type: pms.TypeApplication} + err = store.CreateService(&app2) + if err != nil { + t.Fatal("fail to create service:", err) + } + // Check service count + serviceCount, err = store.GetServiceCount() + if err != nil { + t.Fatal("Failed to get service count:", err) + } + if serviceCount != 2 { + t.Fatalf("Service count doesn't match, expected: 2, actual: %d", serviceCount) + } + + // Create policies in service2 + for _, policy := range policies { + _, err := store.CreatePolicy("service2", &policy) + if err != nil { + t.Fatal("fail to create policy:", err) + } + } + // Check policy count in service2 + policyCount, err = store.GetPolicyCount("service2") + if err != nil { + t.Fatal("Failed to get the policy count: ", err) + } + if policyCount != int64(len(policies)) { + t.Fatalf("Policy count doesn't match, expected:%d, actual:%d", len(policies), policyCount) + } + // Check policy count in both service1 and service2 + policyCount, err = store.GetPolicyCount("") + if err != nil { + t.Fatal("Failed to get the policy count: ", err) + } + if policyCount != int64(len(policies)*2) { + t.Fatalf("Policy count doesn't match, expected:%d, actual:%d", len(policies)*2, policyCount) + } + + // Create rolePolicy in service2 + for _, rolePolicy := range rolePolicies { + _, err := store.CreateRolePolicy("service2", &rolePolicy) + if err != nil { + t.Fatal("Failed to get role policy count:", err) + } + } + // Check role Policy count in service2 + rolePolicyCount, err = store.GetRolePolicyCount("service2") + if err != nil { + t.Fatal("Failed to get the role policy count") + } + if rolePolicyCount != int64(len(rolePolicies)) { + t.Fatalf("RolePolicy count doesn't match, expected:%d, actual:%d", len(rolePolicies), rolePolicyCount) + } + // Check role Policy count in both service1 and service2 + rolePolicyCount, err = store.GetRolePolicyCount("") + if err != nil { + t.Fatal("Failed to get the role policy count") + } + if rolePolicyCount != int64(len(rolePolicies)*2) { + t.Fatalf("RolePolicy count doesn't match, expected:%d, actual:%d", len(rolePolicies)*2, rolePolicyCount) + } + counts, err := store.GetPolicyAndRolePolicyCounts() + if err != nil { + t.Fatal("Fail to getCounts", err) + } + if (counts["service1"].PolicyCount != int64(len(policies))) || + (counts["service2"].PolicyCount != int64(len(policies))) { + t.Fatal("incorrect policy number") + } + if (counts["service1"].RolePolicyCount != int64(len(rolePolicies))) || + (counts["service1"].RolePolicyCount != int64(len(rolePolicies))) { + t.Fatal("incorrect role policy number") + } + fmt.Println(counts) +} diff --git a/pkg/store/mongodb/storeBuiler.go b/pkg/store/mongodb/storeBuiler.go new file mode 100644 index 0000000..23ff6e5 --- /dev/null +++ b/pkg/store/mongodb/storeBuiler.go @@ -0,0 +1,81 @@ +//Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. +//Licensed under the Universal Permissive License (UPL) Version 1.0 as shown at http://oss.oracle.com/licenses/upl. + +package mongodb + +import ( + "context" + + "github.com/prometheus/common/log" + "github.com/spf13/pflag" + "github.com/teramoby/speedle-plus/api/pms" + "github.com/teramoby/speedle-plus/pkg/store" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +const ( + StoreType = "mongodb" + + //Following are keys of mongodb store properties + MongoURIKey = "MongoURI" + MongoDatabaseNameKey = "MongoDatabase" + + MongoURIFlag = "mongostore_uri" + MongoDatabaseNameFlag = "mongostore_database" + + //default property values + DefaultURI = "mongodb://localhost:27017" + DefaultDatabaseName = "speedleplus" +) + +type MongoStoreBuilder struct{} + +func (msb MongoStoreBuilder) NewStore(config map[string]interface{}) (pms.PolicyStoreManager, error) { + mongoURI, ok := config[MongoURIKey].(string) + if !ok { + mongoURI = DefaultURI + } + + mongoDatabase, ok := config[MongoDatabaseNameKey].(string) + if !ok { + mongoDatabase = DefaultDatabaseName + } + + // Set client options + clientOptions := options.Client().ApplyURI(mongoURI) + + // Connect to MongoDB + client, err := mongo.Connect(context.TODO(), clientOptions) + + if err != nil { + log.Fatal(err) + return nil, err + } + + // Check the connection + err = client.Ping(context.TODO(), nil) + + if err != nil { + log.Fatal(err) + return nil, err + } + + return &Store{client: client, Database: mongoDatabase}, nil + +} + +func (msb MongoStoreBuilder) GetStoreParams() map[string]string { + return map[string]string{ + MongoURIFlag: MongoURIKey, + MongoDatabaseNameFlag: MongoDatabaseNameKey, + } + +} + +func init() { + pflag.String(MongoURIFlag, DefaultURI, "Store config: URI of mongoDB.") + pflag.String(MongoDatabaseNameFlag, DefaultDatabaseName, "Store config: database to store speedle policy data.") + + store.Register(StoreType, MongoStoreBuilder{}) +} diff --git a/pkg/svcs/adsgrpc/run_mongodb_test.sh b/pkg/svcs/adsgrpc/run_mongodb_test.sh new file mode 100755 index 0000000..a7c1b13 --- /dev/null +++ b/pkg/svcs/adsgrpc/run_mongodb_test.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +shell_dir=$(dirname $0) + +set -ex +source ${GOPATH}/src/github.com/teramoby/speedle-plus/setTestEnv.sh + +go clean -testcache +#Reconfig spctl +${GOPATH}/bin/spctl config ads-endpoint http://localhost:6734/authz-check/v1/ +${GOPATH}/bin/spctl config pms-endpoint http://localhost:6733/policy-mgmt/v1/ + +startPMS mongodb --config-file ${shell_dir}/../pmsrest/config_mongodb.json +startADS --config-file ${shell_dir}/../pmsrest/config_mongodb.json + +sleep 5 +${GOPATH}/bin/spctl delete service --all +go test ${TEST_OPTS} github.com/teramoby/speedle-plus/pkg/svcs/adsgrpc -run=TestMats diff --git a/pkg/svcs/adsrest/run_mongodb_test.sh b/pkg/svcs/adsrest/run_mongodb_test.sh new file mode 100755 index 0000000..1cc1bee --- /dev/null +++ b/pkg/svcs/adsrest/run_mongodb_test.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +shell_dir=$(dirname $0) + +set -ex +source ${GOPATH}/src/github.com/teramoby/speedle-plus/setTestEnv.sh +go clean -testcache + +#Reconfig spctl +${GOPATH}/bin/spctl config ads-endpoint http://localhost:6734/authz-check/v1/ +${GOPATH}/bin/spctl config pms-endpoint http://localhost:6733/policy-mgmt/v1/ + +startPMS mongodb --config-file ${shell_dir}/../pmsrest/config_mongodb.json +startADS --config-file ${shell_dir}/../pmsrest/config_mongodb.json + +${GOPATH}/bin/spctl delete service --all +go test ${TEST_OPTS} github.com/teramoby/speedle-plus/pkg/svcs/adsrest -tags=runtime_test_prepare +${GOPATH}/bin/spctl get service --all +sleep 2 +go test ${TEST_OPTS} github.com/teramoby/speedle-plus/pkg/svcs/adsrest -tags="runtime_test runtime_cache_test" -run=TestMats + diff --git a/pkg/svcs/pmsgrpc/run_mongodb_test.sh b/pkg/svcs/pmsgrpc/run_mongodb_test.sh new file mode 100755 index 0000000..f761678 --- /dev/null +++ b/pkg/svcs/pmsgrpc/run_mongodb_test.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +shell_dir=$(dirname $0) + +set -ex +source ${GOPATH}/src/github.com/teramoby/speedle-plus/setTestEnv.sh + +go clean -testcache + +#Reconfig spctl +${GOPATH}/bin/spctl config ads-endpoint http://localhost:6734/authz-check/v1/ +${GOPATH}/bin/spctl config pms-endpoint http://localhost:6733/policy-mgmt/v1/ + + +startPMS mongodb --config-file ${shell_dir}/../pmsrest/config_mongodb.json + +sleep 5 +${GOPATH}/bin/spctl delete service --all + +go test ${TEST_OPTS} github.com/teramoby/speedle-plus/pkg/svcs/pmsgrpc -run=TestMats diff --git a/pkg/svcs/pmsrest/config_mongdb_cloud.json b/pkg/svcs/pmsrest/config_mongdb_cloud.json new file mode 100644 index 0000000..486ae38 --- /dev/null +++ b/pkg/svcs/pmsrest/config_mongdb_cloud.json @@ -0,0 +1,10 @@ +{ + "storeConfig": { + "storeType": "mongodb", + "storeProps": { + "MongoURI": "mongodb+srv://speedleplus:Passw0rd@cluster0-wfhda.mongodb.net/speedletest?retryWrites=true&w=majority", + "MongoDatabase": "speedletest" + } + }, + "enableWatch": true +} \ No newline at end of file diff --git a/pkg/svcs/pmsrest/config_mongodb.json b/pkg/svcs/pmsrest/config_mongodb.json new file mode 100644 index 0000000..acd64f3 --- /dev/null +++ b/pkg/svcs/pmsrest/config_mongodb.json @@ -0,0 +1,10 @@ +{ + "storeConfig": { + "storeType": "mongodb", + "storeProps": { + "MongoURI": "mongodb://localhost:27017", + "MongoDatabase": "speedletest" + } + }, + "enableWatch": true +} \ No newline at end of file diff --git a/pkg/svcs/pmsrest/run_mongodb_test.sh b/pkg/svcs/pmsrest/run_mongodb_test.sh new file mode 100755 index 0000000..d50075a --- /dev/null +++ b/pkg/svcs/pmsrest/run_mongodb_test.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +shell_dir=$(dirname $0) + +set -ex +source ${GOPATH}/src/github.com/teramoby/speedle-plus/setTestEnv.sh + +go clean -testcache + +#Reconfig spctl +${GOPATH}/bin/spctl config ads-endpoint http://localhost:6734/authz-check/v1/ +${GOPATH}/bin/spctl config pms-endpoint http://localhost:6733/policy-mgmt/v1/ + + +startPMS mongodb --config-file ${shell_dir}/config_mongodb.json + +sleep 5 +${GOPATH}/bin/spctl delete service --all +go test ${TEST_OPTS} github.com/teramoby/speedle-plus/pkg/svcs/pmsrest $* diff --git a/setTestEnv.sh b/setTestEnv.sh index 50b6e49..c919099 100644 --- a/setTestEnv.sh +++ b/setTestEnv.sh @@ -111,7 +111,7 @@ function ensureTestDir() { rm -f ${temp_policy_file} echo "{}" > ${temp_policy_file} add_exit_trap "rm -f ${temp_policy_file}" - else + elif [ "$1" == "etcd" ];then rm -rf ./speedle.etcd add_exit_trap "rm -rf ./speedle.etcd" fi diff --git a/vendor/github.com/alecthomas/template/LICENSE b/vendor/github.com/alecthomas/template/LICENSE new file mode 100644 index 0000000..7448756 --- /dev/null +++ b/vendor/github.com/alecthomas/template/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/alecthomas/template/doc.go b/vendor/github.com/alecthomas/template/doc.go new file mode 100644 index 0000000..223c595 --- /dev/null +++ b/vendor/github.com/alecthomas/template/doc.go @@ -0,0 +1,406 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package template implements data-driven templates for generating textual output. + +To generate HTML output, see package html/template, which has the same interface +as this package but automatically secures HTML output against certain attacks. + +Templates are executed by applying them to a data structure. Annotations in the +template refer to elements of the data structure (typically a field of a struct +or a key in a map) to control execution and derive values to be displayed. +Execution of the template walks the structure and sets the cursor, represented +by a period '.' and called "dot", to the value at the current location in the +structure as execution proceeds. + +The input text for a template is UTF-8-encoded text in any format. +"Actions"--data evaluations or control structures--are delimited by +"{{" and "}}"; all text outside actions is copied to the output unchanged. +Actions may not span newlines, although comments can. + +Once parsed, a template may be executed safely in parallel. + +Here is a trivial example that prints "17 items are made of wool". + + type Inventory struct { + Material string + Count uint + } + sweaters := Inventory{"wool", 17} + tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}") + if err != nil { panic(err) } + err = tmpl.Execute(os.Stdout, sweaters) + if err != nil { panic(err) } + +More intricate examples appear below. + +Actions + +Here is the list of actions. "Arguments" and "pipelines" are evaluations of +data, defined in detail below. + +*/ +// {{/* a comment */}} +// A comment; discarded. May contain newlines. +// Comments do not nest and must start and end at the +// delimiters, as shown here. +/* + + {{pipeline}} + The default textual representation of the value of the pipeline + is copied to the output. + + {{if pipeline}} T1 {{end}} + If the value of the pipeline is empty, no output is generated; + otherwise, T1 is executed. The empty values are false, 0, any + nil pointer or interface value, and any array, slice, map, or + string of length zero. + Dot is unaffected. + + {{if pipeline}} T1 {{else}} T0 {{end}} + If the value of the pipeline is empty, T0 is executed; + otherwise, T1 is executed. Dot is unaffected. + + {{if pipeline}} T1 {{else if pipeline}} T0 {{end}} + To simplify the appearance of if-else chains, the else action + of an if may include another if directly; the effect is exactly + the same as writing + {{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}} + + {{range pipeline}} T1 {{end}} + The value of the pipeline must be an array, slice, map, or channel. + If the value of the pipeline has length zero, nothing is output; + otherwise, dot is set to the successive elements of the array, + slice, or map and T1 is executed. If the value is a map and the + keys are of basic type with a defined order ("comparable"), the + elements will be visited in sorted key order. + + {{range pipeline}} T1 {{else}} T0 {{end}} + The value of the pipeline must be an array, slice, map, or channel. + If the value of the pipeline has length zero, dot is unaffected and + T0 is executed; otherwise, dot is set to the successive elements + of the array, slice, or map and T1 is executed. + + {{template "name"}} + The template with the specified name is executed with nil data. + + {{template "name" pipeline}} + The template with the specified name is executed with dot set + to the value of the pipeline. + + {{with pipeline}} T1 {{end}} + If the value of the pipeline is empty, no output is generated; + otherwise, dot is set to the value of the pipeline and T1 is + executed. + + {{with pipeline}} T1 {{else}} T0 {{end}} + If the value of the pipeline is empty, dot is unaffected and T0 + is executed; otherwise, dot is set to the value of the pipeline + and T1 is executed. + +Arguments + +An argument is a simple value, denoted by one of the following. + + - A boolean, string, character, integer, floating-point, imaginary + or complex constant in Go syntax. These behave like Go's untyped + constants, although raw strings may not span newlines. + - The keyword nil, representing an untyped Go nil. + - The character '.' (period): + . + The result is the value of dot. + - A variable name, which is a (possibly empty) alphanumeric string + preceded by a dollar sign, such as + $piOver2 + or + $ + The result is the value of the variable. + Variables are described below. + - The name of a field of the data, which must be a struct, preceded + by a period, such as + .Field + The result is the value of the field. Field invocations may be + chained: + .Field1.Field2 + Fields can also be evaluated on variables, including chaining: + $x.Field1.Field2 + - The name of a key of the data, which must be a map, preceded + by a period, such as + .Key + The result is the map element value indexed by the key. + Key invocations may be chained and combined with fields to any + depth: + .Field1.Key1.Field2.Key2 + Although the key must be an alphanumeric identifier, unlike with + field names they do not need to start with an upper case letter. + Keys can also be evaluated on variables, including chaining: + $x.key1.key2 + - The name of a niladic method of the data, preceded by a period, + such as + .Method + The result is the value of invoking the method with dot as the + receiver, dot.Method(). Such a method must have one return value (of + any type) or two return values, the second of which is an error. + If it has two and the returned error is non-nil, execution terminates + and an error is returned to the caller as the value of Execute. + Method invocations may be chained and combined with fields and keys + to any depth: + .Field1.Key1.Method1.Field2.Key2.Method2 + Methods can also be evaluated on variables, including chaining: + $x.Method1.Field + - The name of a niladic function, such as + fun + The result is the value of invoking the function, fun(). The return + types and values behave as in methods. Functions and function + names are described below. + - A parenthesized instance of one the above, for grouping. The result + may be accessed by a field or map key invocation. + print (.F1 arg1) (.F2 arg2) + (.StructValuedMethod "arg").Field + +Arguments may evaluate to any type; if they are pointers the implementation +automatically indirects to the base type when required. +If an evaluation yields a function value, such as a function-valued +field of a struct, the function is not invoked automatically, but it +can be used as a truth value for an if action and the like. To invoke +it, use the call function, defined below. + +A pipeline is a possibly chained sequence of "commands". A command is a simple +value (argument) or a function or method call, possibly with multiple arguments: + + Argument + The result is the value of evaluating the argument. + .Method [Argument...] + The method can be alone or the last element of a chain but, + unlike methods in the middle of a chain, it can take arguments. + The result is the value of calling the method with the + arguments: + dot.Method(Argument1, etc.) + functionName [Argument...] + The result is the value of calling the function associated + with the name: + function(Argument1, etc.) + Functions and function names are described below. + +Pipelines + +A pipeline may be "chained" by separating a sequence of commands with pipeline +characters '|'. In a chained pipeline, the result of the each command is +passed as the last argument of the following command. The output of the final +command in the pipeline is the value of the pipeline. + +The output of a command will be either one value or two values, the second of +which has type error. If that second value is present and evaluates to +non-nil, execution terminates and the error is returned to the caller of +Execute. + +Variables + +A pipeline inside an action may initialize a variable to capture the result. +The initialization has syntax + + $variable := pipeline + +where $variable is the name of the variable. An action that declares a +variable produces no output. + +If a "range" action initializes a variable, the variable is set to the +successive elements of the iteration. Also, a "range" may declare two +variables, separated by a comma: + + range $index, $element := pipeline + +in which case $index and $element are set to the successive values of the +array/slice index or map key and element, respectively. Note that if there is +only one variable, it is assigned the element; this is opposite to the +convention in Go range clauses. + +A variable's scope extends to the "end" action of the control structure ("if", +"with", or "range") in which it is declared, or to the end of the template if +there is no such control structure. A template invocation does not inherit +variables from the point of its invocation. + +When execution begins, $ is set to the data argument passed to Execute, that is, +to the starting value of dot. + +Examples + +Here are some example one-line templates demonstrating pipelines and variables. +All produce the quoted word "output": + + {{"\"output\""}} + A string constant. + {{`"output"`}} + A raw string constant. + {{printf "%q" "output"}} + A function call. + {{"output" | printf "%q"}} + A function call whose final argument comes from the previous + command. + {{printf "%q" (print "out" "put")}} + A parenthesized argument. + {{"put" | printf "%s%s" "out" | printf "%q"}} + A more elaborate call. + {{"output" | printf "%s" | printf "%q"}} + A longer chain. + {{with "output"}}{{printf "%q" .}}{{end}} + A with action using dot. + {{with $x := "output" | printf "%q"}}{{$x}}{{end}} + A with action that creates and uses a variable. + {{with $x := "output"}}{{printf "%q" $x}}{{end}} + A with action that uses the variable in another action. + {{with $x := "output"}}{{$x | printf "%q"}}{{end}} + The same, but pipelined. + +Functions + +During execution functions are found in two function maps: first in the +template, then in the global function map. By default, no functions are defined +in the template but the Funcs method can be used to add them. + +Predefined global functions are named as follows. + + and + Returns the boolean AND of its arguments by returning the + first empty argument or the last argument, that is, + "and x y" behaves as "if x then y else x". All the + arguments are evaluated. + call + Returns the result of calling the first argument, which + must be a function, with the remaining arguments as parameters. + Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where + Y is a func-valued field, map entry, or the like. + The first argument must be the result of an evaluation + that yields a value of function type (as distinct from + a predefined function such as print). The function must + return either one or two result values, the second of which + is of type error. If the arguments don't match the function + or the returned error value is non-nil, execution stops. + html + Returns the escaped HTML equivalent of the textual + representation of its arguments. + index + Returns the result of indexing its first argument by the + following arguments. Thus "index x 1 2 3" is, in Go syntax, + x[1][2][3]. Each indexed item must be a map, slice, or array. + js + Returns the escaped JavaScript equivalent of the textual + representation of its arguments. + len + Returns the integer length of its argument. + not + Returns the boolean negation of its single argument. + or + Returns the boolean OR of its arguments by returning the + first non-empty argument or the last argument, that is, + "or x y" behaves as "if x then x else y". All the + arguments are evaluated. + print + An alias for fmt.Sprint + printf + An alias for fmt.Sprintf + println + An alias for fmt.Sprintln + urlquery + Returns the escaped value of the textual representation of + its arguments in a form suitable for embedding in a URL query. + +The boolean functions take any zero value to be false and a non-zero +value to be true. + +There is also a set of binary comparison operators defined as +functions: + + eq + Returns the boolean truth of arg1 == arg2 + ne + Returns the boolean truth of arg1 != arg2 + lt + Returns the boolean truth of arg1 < arg2 + le + Returns the boolean truth of arg1 <= arg2 + gt + Returns the boolean truth of arg1 > arg2 + ge + Returns the boolean truth of arg1 >= arg2 + +For simpler multi-way equality tests, eq (only) accepts two or more +arguments and compares the second and subsequent to the first, +returning in effect + + arg1==arg2 || arg1==arg3 || arg1==arg4 ... + +(Unlike with || in Go, however, eq is a function call and all the +arguments will be evaluated.) + +The comparison functions work on basic types only (or named basic +types, such as "type Celsius float32"). They implement the Go rules +for comparison of values, except that size and exact type are +ignored, so any integer value, signed or unsigned, may be compared +with any other integer value. (The arithmetic value is compared, +not the bit pattern, so all negative integers are less than all +unsigned integers.) However, as usual, one may not compare an int +with a float32 and so on. + +Associated templates + +Each template is named by a string specified when it is created. Also, each +template is associated with zero or more other templates that it may invoke by +name; such associations are transitive and form a name space of templates. + +A template may use a template invocation to instantiate another associated +template; see the explanation of the "template" action above. The name must be +that of a template associated with the template that contains the invocation. + +Nested template definitions + +When parsing a template, another template may be defined and associated with the +template being parsed. Template definitions must appear at the top level of the +template, much like global variables in a Go program. + +The syntax of such definitions is to surround each template declaration with a +"define" and "end" action. + +The define action names the template being created by providing a string +constant. Here is a simple example: + + `{{define "T1"}}ONE{{end}} + {{define "T2"}}TWO{{end}} + {{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}} + {{template "T3"}}` + +This defines two templates, T1 and T2, and a third T3 that invokes the other two +when it is executed. Finally it invokes T3. If executed this template will +produce the text + + ONE TWO + +By construction, a template may reside in only one association. If it's +necessary to have a template addressable from multiple associations, the +template definition must be parsed multiple times to create distinct *Template +values, or must be copied with the Clone or AddParseTree method. + +Parse may be called multiple times to assemble the various associated templates; +see the ParseFiles and ParseGlob functions and methods for simple ways to parse +related templates stored in files. + +A template may be executed directly or through ExecuteTemplate, which executes +an associated template identified by name. To invoke our example above, we +might write, + + err := tmpl.Execute(os.Stdout, "no data needed") + if err != nil { + log.Fatalf("execution failed: %s", err) + } + +or to invoke a particular template explicitly by name, + + err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed") + if err != nil { + log.Fatalf("execution failed: %s", err) + } + +*/ +package template diff --git a/vendor/github.com/alecthomas/template/exec.go b/vendor/github.com/alecthomas/template/exec.go new file mode 100644 index 0000000..c3078e5 --- /dev/null +++ b/vendor/github.com/alecthomas/template/exec.go @@ -0,0 +1,845 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "bytes" + "fmt" + "io" + "reflect" + "runtime" + "sort" + "strings" + + "github.com/alecthomas/template/parse" +) + +// state represents the state of an execution. It's not part of the +// template so that multiple executions of the same template +// can execute in parallel. +type state struct { + tmpl *Template + wr io.Writer + node parse.Node // current node, for errors + vars []variable // push-down stack of variable values. +} + +// variable holds the dynamic value of a variable such as $, $x etc. +type variable struct { + name string + value reflect.Value +} + +// push pushes a new variable on the stack. +func (s *state) push(name string, value reflect.Value) { + s.vars = append(s.vars, variable{name, value}) +} + +// mark returns the length of the variable stack. +func (s *state) mark() int { + return len(s.vars) +} + +// pop pops the variable stack up to the mark. +func (s *state) pop(mark int) { + s.vars = s.vars[0:mark] +} + +// setVar overwrites the top-nth variable on the stack. Used by range iterations. +func (s *state) setVar(n int, value reflect.Value) { + s.vars[len(s.vars)-n].value = value +} + +// varValue returns the value of the named variable. +func (s *state) varValue(name string) reflect.Value { + for i := s.mark() - 1; i >= 0; i-- { + if s.vars[i].name == name { + return s.vars[i].value + } + } + s.errorf("undefined variable: %s", name) + return zero +} + +var zero reflect.Value + +// at marks the state to be on node n, for error reporting. +func (s *state) at(node parse.Node) { + s.node = node +} + +// doublePercent returns the string with %'s replaced by %%, if necessary, +// so it can be used safely inside a Printf format string. +func doublePercent(str string) string { + if strings.Contains(str, "%") { + str = strings.Replace(str, "%", "%%", -1) + } + return str +} + +// errorf formats the error and terminates processing. +func (s *state) errorf(format string, args ...interface{}) { + name := doublePercent(s.tmpl.Name()) + if s.node == nil { + format = fmt.Sprintf("template: %s: %s", name, format) + } else { + location, context := s.tmpl.ErrorContext(s.node) + format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format) + } + panic(fmt.Errorf(format, args...)) +} + +// errRecover is the handler that turns panics into returns from the top +// level of Parse. +func errRecover(errp *error) { + e := recover() + if e != nil { + switch err := e.(type) { + case runtime.Error: + panic(e) + case error: + *errp = err + default: + panic(e) + } + } +} + +// ExecuteTemplate applies the template associated with t that has the given name +// to the specified data object and writes the output to wr. +// If an error occurs executing the template or writing its output, +// execution stops, but partial results may already have been written to +// the output writer. +// A template may be executed safely in parallel. +func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error { + tmpl := t.tmpl[name] + if tmpl == nil { + return fmt.Errorf("template: no template %q associated with template %q", name, t.name) + } + return tmpl.Execute(wr, data) +} + +// Execute applies a parsed template to the specified data object, +// and writes the output to wr. +// If an error occurs executing the template or writing its output, +// execution stops, but partial results may already have been written to +// the output writer. +// A template may be executed safely in parallel. +func (t *Template) Execute(wr io.Writer, data interface{}) (err error) { + defer errRecover(&err) + value := reflect.ValueOf(data) + state := &state{ + tmpl: t, + wr: wr, + vars: []variable{{"$", value}}, + } + t.init() + if t.Tree == nil || t.Root == nil { + var b bytes.Buffer + for name, tmpl := range t.tmpl { + if tmpl.Tree == nil || tmpl.Root == nil { + continue + } + if b.Len() > 0 { + b.WriteString(", ") + } + fmt.Fprintf(&b, "%q", name) + } + var s string + if b.Len() > 0 { + s = "; defined templates are: " + b.String() + } + state.errorf("%q is an incomplete or empty template%s", t.Name(), s) + } + state.walk(value, t.Root) + return +} + +// Walk functions step through the major pieces of the template structure, +// generating output as they go. +func (s *state) walk(dot reflect.Value, node parse.Node) { + s.at(node) + switch node := node.(type) { + case *parse.ActionNode: + // Do not pop variables so they persist until next end. + // Also, if the action declares variables, don't print the result. + val := s.evalPipeline(dot, node.Pipe) + if len(node.Pipe.Decl) == 0 { + s.printValue(node, val) + } + case *parse.IfNode: + s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList) + case *parse.ListNode: + for _, node := range node.Nodes { + s.walk(dot, node) + } + case *parse.RangeNode: + s.walkRange(dot, node) + case *parse.TemplateNode: + s.walkTemplate(dot, node) + case *parse.TextNode: + if _, err := s.wr.Write(node.Text); err != nil { + s.errorf("%s", err) + } + case *parse.WithNode: + s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList) + default: + s.errorf("unknown node: %s", node) + } +} + +// walkIfOrWith walks an 'if' or 'with' node. The two control structures +// are identical in behavior except that 'with' sets dot. +func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) { + defer s.pop(s.mark()) + val := s.evalPipeline(dot, pipe) + truth, ok := isTrue(val) + if !ok { + s.errorf("if/with can't use %v", val) + } + if truth { + if typ == parse.NodeWith { + s.walk(val, list) + } else { + s.walk(dot, list) + } + } else if elseList != nil { + s.walk(dot, elseList) + } +} + +// isTrue reports whether the value is 'true', in the sense of not the zero of its type, +// and whether the value has a meaningful truth value. +func isTrue(val reflect.Value) (truth, ok bool) { + if !val.IsValid() { + // Something like var x interface{}, never set. It's a form of nil. + return false, true + } + switch val.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + truth = val.Len() > 0 + case reflect.Bool: + truth = val.Bool() + case reflect.Complex64, reflect.Complex128: + truth = val.Complex() != 0 + case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface: + truth = !val.IsNil() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + truth = val.Int() != 0 + case reflect.Float32, reflect.Float64: + truth = val.Float() != 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + truth = val.Uint() != 0 + case reflect.Struct: + truth = true // Struct values are always true. + default: + return + } + return truth, true +} + +func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) { + s.at(r) + defer s.pop(s.mark()) + val, _ := indirect(s.evalPipeline(dot, r.Pipe)) + // mark top of stack before any variables in the body are pushed. + mark := s.mark() + oneIteration := func(index, elem reflect.Value) { + // Set top var (lexically the second if there are two) to the element. + if len(r.Pipe.Decl) > 0 { + s.setVar(1, elem) + } + // Set next var (lexically the first if there are two) to the index. + if len(r.Pipe.Decl) > 1 { + s.setVar(2, index) + } + s.walk(elem, r.List) + s.pop(mark) + } + switch val.Kind() { + case reflect.Array, reflect.Slice: + if val.Len() == 0 { + break + } + for i := 0; i < val.Len(); i++ { + oneIteration(reflect.ValueOf(i), val.Index(i)) + } + return + case reflect.Map: + if val.Len() == 0 { + break + } + for _, key := range sortKeys(val.MapKeys()) { + oneIteration(key, val.MapIndex(key)) + } + return + case reflect.Chan: + if val.IsNil() { + break + } + i := 0 + for ; ; i++ { + elem, ok := val.Recv() + if !ok { + break + } + oneIteration(reflect.ValueOf(i), elem) + } + if i == 0 { + break + } + return + case reflect.Invalid: + break // An invalid value is likely a nil map, etc. and acts like an empty map. + default: + s.errorf("range can't iterate over %v", val) + } + if r.ElseList != nil { + s.walk(dot, r.ElseList) + } +} + +func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) { + s.at(t) + tmpl := s.tmpl.tmpl[t.Name] + if tmpl == nil { + s.errorf("template %q not defined", t.Name) + } + // Variables declared by the pipeline persist. + dot = s.evalPipeline(dot, t.Pipe) + newState := *s + newState.tmpl = tmpl + // No dynamic scoping: template invocations inherit no variables. + newState.vars = []variable{{"$", dot}} + newState.walk(dot, tmpl.Root) +} + +// Eval functions evaluate pipelines, commands, and their elements and extract +// values from the data structure by examining fields, calling methods, and so on. +// The printing of those values happens only through walk functions. + +// evalPipeline returns the value acquired by evaluating a pipeline. If the +// pipeline has a variable declaration, the variable will be pushed on the +// stack. Callers should therefore pop the stack after they are finished +// executing commands depending on the pipeline value. +func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) { + if pipe == nil { + return + } + s.at(pipe) + for _, cmd := range pipe.Cmds { + value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg. + // If the object has type interface{}, dig down one level to the thing inside. + if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 { + value = reflect.ValueOf(value.Interface()) // lovely! + } + } + for _, variable := range pipe.Decl { + s.push(variable.Ident[0], value) + } + return value +} + +func (s *state) notAFunction(args []parse.Node, final reflect.Value) { + if len(args) > 1 || final.IsValid() { + s.errorf("can't give argument to non-function %s", args[0]) + } +} + +func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value { + firstWord := cmd.Args[0] + switch n := firstWord.(type) { + case *parse.FieldNode: + return s.evalFieldNode(dot, n, cmd.Args, final) + case *parse.ChainNode: + return s.evalChainNode(dot, n, cmd.Args, final) + case *parse.IdentifierNode: + // Must be a function. + return s.evalFunction(dot, n, cmd, cmd.Args, final) + case *parse.PipeNode: + // Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored. + return s.evalPipeline(dot, n) + case *parse.VariableNode: + return s.evalVariableNode(dot, n, cmd.Args, final) + } + s.at(firstWord) + s.notAFunction(cmd.Args, final) + switch word := firstWord.(type) { + case *parse.BoolNode: + return reflect.ValueOf(word.True) + case *parse.DotNode: + return dot + case *parse.NilNode: + s.errorf("nil is not a command") + case *parse.NumberNode: + return s.idealConstant(word) + case *parse.StringNode: + return reflect.ValueOf(word.Text) + } + s.errorf("can't evaluate command %q", firstWord) + panic("not reached") +} + +// idealConstant is called to return the value of a number in a context where +// we don't know the type. In that case, the syntax of the number tells us +// its type, and we use Go rules to resolve. Note there is no such thing as +// a uint ideal constant in this situation - the value must be of int type. +func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value { + // These are ideal constants but we don't know the type + // and we have no context. (If it was a method argument, + // we'd know what we need.) The syntax guides us to some extent. + s.at(constant) + switch { + case constant.IsComplex: + return reflect.ValueOf(constant.Complex128) // incontrovertible. + case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0: + return reflect.ValueOf(constant.Float64) + case constant.IsInt: + n := int(constant.Int64) + if int64(n) != constant.Int64 { + s.errorf("%s overflows int", constant.Text) + } + return reflect.ValueOf(n) + case constant.IsUint: + s.errorf("%s overflows int", constant.Text) + } + return zero +} + +func isHexConstant(s string) bool { + return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') +} + +func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value { + s.at(field) + return s.evalFieldChain(dot, dot, field, field.Ident, args, final) +} + +func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value { + s.at(chain) + // (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields. + pipe := s.evalArg(dot, nil, chain.Node) + if len(chain.Field) == 0 { + s.errorf("internal error: no fields in evalChainNode") + } + return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final) +} + +func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value { + // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields. + s.at(variable) + value := s.varValue(variable.Ident[0]) + if len(variable.Ident) == 1 { + s.notAFunction(args, final) + return value + } + return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final) +} + +// evalFieldChain evaluates .X.Y.Z possibly followed by arguments. +// dot is the environment in which to evaluate arguments, while +// receiver is the value being walked along the chain. +func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value { + n := len(ident) + for i := 0; i < n-1; i++ { + receiver = s.evalField(dot, ident[i], node, nil, zero, receiver) + } + // Now if it's a method, it gets the arguments. + return s.evalField(dot, ident[n-1], node, args, final, receiver) +} + +func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value { + s.at(node) + name := node.Ident + function, ok := findFunction(name, s.tmpl) + if !ok { + s.errorf("%q is not a defined function", name) + } + return s.evalCall(dot, function, cmd, name, args, final) +} + +// evalField evaluates an expression like (.Field) or (.Field arg1 arg2). +// The 'final' argument represents the return value from the preceding +// value of the pipeline, if any. +func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value { + if !receiver.IsValid() { + return zero + } + typ := receiver.Type() + receiver, _ = indirect(receiver) + // Unless it's an interface, need to get to a value of type *T to guarantee + // we see all methods of T and *T. + ptr := receiver + if ptr.Kind() != reflect.Interface && ptr.CanAddr() { + ptr = ptr.Addr() + } + if method := ptr.MethodByName(fieldName); method.IsValid() { + return s.evalCall(dot, method, node, fieldName, args, final) + } + hasArgs := len(args) > 1 || final.IsValid() + // It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil. + receiver, isNil := indirect(receiver) + if isNil { + s.errorf("nil pointer evaluating %s.%s", typ, fieldName) + } + switch receiver.Kind() { + case reflect.Struct: + tField, ok := receiver.Type().FieldByName(fieldName) + if ok { + field := receiver.FieldByIndex(tField.Index) + if tField.PkgPath != "" { // field is unexported + s.errorf("%s is an unexported field of struct type %s", fieldName, typ) + } + // If it's a function, we must call it. + if hasArgs { + s.errorf("%s has arguments but cannot be invoked as function", fieldName) + } + return field + } + s.errorf("%s is not a field of struct type %s", fieldName, typ) + case reflect.Map: + // If it's a map, attempt to use the field name as a key. + nameVal := reflect.ValueOf(fieldName) + if nameVal.Type().AssignableTo(receiver.Type().Key()) { + if hasArgs { + s.errorf("%s is not a method but has arguments", fieldName) + } + return receiver.MapIndex(nameVal) + } + } + s.errorf("can't evaluate field %s in type %s", fieldName, typ) + panic("not reached") +} + +var ( + errorType = reflect.TypeOf((*error)(nil)).Elem() + fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() +) + +// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so +// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0] +// as the function itself. +func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value { + if args != nil { + args = args[1:] // Zeroth arg is function name/node; not passed to function. + } + typ := fun.Type() + numIn := len(args) + if final.IsValid() { + numIn++ + } + numFixed := len(args) + if typ.IsVariadic() { + numFixed = typ.NumIn() - 1 // last arg is the variadic one. + if numIn < numFixed { + s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args)) + } + } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() { + s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args)) + } + if !goodFunc(typ) { + // TODO: This could still be a confusing error; maybe goodFunc should provide info. + s.errorf("can't call method/function %q with %d results", name, typ.NumOut()) + } + // Build the arg list. + argv := make([]reflect.Value, numIn) + // Args must be evaluated. Fixed args first. + i := 0 + for ; i < numFixed && i < len(args); i++ { + argv[i] = s.evalArg(dot, typ.In(i), args[i]) + } + // Now the ... args. + if typ.IsVariadic() { + argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice. + for ; i < len(args); i++ { + argv[i] = s.evalArg(dot, argType, args[i]) + } + } + // Add final value if necessary. + if final.IsValid() { + t := typ.In(typ.NumIn() - 1) + if typ.IsVariadic() { + t = t.Elem() + } + argv[i] = s.validateType(final, t) + } + result := fun.Call(argv) + // If we have an error that is not nil, stop execution and return that error to the caller. + if len(result) == 2 && !result[1].IsNil() { + s.at(node) + s.errorf("error calling %s: %s", name, result[1].Interface().(error)) + } + return result[0] +} + +// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero. +func canBeNil(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return true + } + return false +} + +// validateType guarantees that the value is valid and assignable to the type. +func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value { + if !value.IsValid() { + if typ == nil || canBeNil(typ) { + // An untyped nil interface{}. Accept as a proper nil value. + return reflect.Zero(typ) + } + s.errorf("invalid value; expected %s", typ) + } + if typ != nil && !value.Type().AssignableTo(typ) { + if value.Kind() == reflect.Interface && !value.IsNil() { + value = value.Elem() + if value.Type().AssignableTo(typ) { + return value + } + // fallthrough + } + // Does one dereference or indirection work? We could do more, as we + // do with method receivers, but that gets messy and method receivers + // are much more constrained, so it makes more sense there than here. + // Besides, one is almost always all you need. + switch { + case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ): + value = value.Elem() + if !value.IsValid() { + s.errorf("dereference of nil pointer of type %s", typ) + } + case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr(): + value = value.Addr() + default: + s.errorf("wrong type for value; expected %s; got %s", typ, value.Type()) + } + } + return value +} + +func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + switch arg := n.(type) { + case *parse.DotNode: + return s.validateType(dot, typ) + case *parse.NilNode: + if canBeNil(typ) { + return reflect.Zero(typ) + } + s.errorf("cannot assign nil to %s", typ) + case *parse.FieldNode: + return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ) + case *parse.VariableNode: + return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ) + case *parse.PipeNode: + return s.validateType(s.evalPipeline(dot, arg), typ) + case *parse.IdentifierNode: + return s.evalFunction(dot, arg, arg, nil, zero) + case *parse.ChainNode: + return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ) + } + switch typ.Kind() { + case reflect.Bool: + return s.evalBool(typ, n) + case reflect.Complex64, reflect.Complex128: + return s.evalComplex(typ, n) + case reflect.Float32, reflect.Float64: + return s.evalFloat(typ, n) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return s.evalInteger(typ, n) + case reflect.Interface: + if typ.NumMethod() == 0 { + return s.evalEmptyInterface(dot, n) + } + case reflect.String: + return s.evalString(typ, n) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return s.evalUnsignedInteger(typ, n) + } + s.errorf("can't handle %s for arg of type %s", n, typ) + panic("not reached") +} + +func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.BoolNode); ok { + value := reflect.New(typ).Elem() + value.SetBool(n.True) + return value + } + s.errorf("expected bool; found %s", n) + panic("not reached") +} + +func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.StringNode); ok { + value := reflect.New(typ).Elem() + value.SetString(n.Text) + return value + } + s.errorf("expected string; found %s", n) + panic("not reached") +} + +func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.NumberNode); ok && n.IsInt { + value := reflect.New(typ).Elem() + value.SetInt(n.Int64) + return value + } + s.errorf("expected integer; found %s", n) + panic("not reached") +} + +func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.NumberNode); ok && n.IsUint { + value := reflect.New(typ).Elem() + value.SetUint(n.Uint64) + return value + } + s.errorf("expected unsigned integer; found %s", n) + panic("not reached") +} + +func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.NumberNode); ok && n.IsFloat { + value := reflect.New(typ).Elem() + value.SetFloat(n.Float64) + return value + } + s.errorf("expected float; found %s", n) + panic("not reached") +} + +func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value { + if n, ok := n.(*parse.NumberNode); ok && n.IsComplex { + value := reflect.New(typ).Elem() + value.SetComplex(n.Complex128) + return value + } + s.errorf("expected complex; found %s", n) + panic("not reached") +} + +func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value { + s.at(n) + switch n := n.(type) { + case *parse.BoolNode: + return reflect.ValueOf(n.True) + case *parse.DotNode: + return dot + case *parse.FieldNode: + return s.evalFieldNode(dot, n, nil, zero) + case *parse.IdentifierNode: + return s.evalFunction(dot, n, n, nil, zero) + case *parse.NilNode: + // NilNode is handled in evalArg, the only place that calls here. + s.errorf("evalEmptyInterface: nil (can't happen)") + case *parse.NumberNode: + return s.idealConstant(n) + case *parse.StringNode: + return reflect.ValueOf(n.Text) + case *parse.VariableNode: + return s.evalVariableNode(dot, n, nil, zero) + case *parse.PipeNode: + return s.evalPipeline(dot, n) + } + s.errorf("can't handle assignment of %s to empty interface argument", n) + panic("not reached") +} + +// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. +// We indirect through pointers and empty interfaces (only) because +// non-empty interfaces have methods we might need. +func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { + for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { + if v.IsNil() { + return v, true + } + if v.Kind() == reflect.Interface && v.NumMethod() > 0 { + break + } + } + return v, false +} + +// printValue writes the textual representation of the value to the output of +// the template. +func (s *state) printValue(n parse.Node, v reflect.Value) { + s.at(n) + iface, ok := printableValue(v) + if !ok { + s.errorf("can't print %s of type %s", n, v.Type()) + } + fmt.Fprint(s.wr, iface) +} + +// printableValue returns the, possibly indirected, interface value inside v that +// is best for a call to formatted printer. +func printableValue(v reflect.Value) (interface{}, bool) { + if v.Kind() == reflect.Ptr { + v, _ = indirect(v) // fmt.Fprint handles nil. + } + if !v.IsValid() { + return "", true + } + + if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { + if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) { + v = v.Addr() + } else { + switch v.Kind() { + case reflect.Chan, reflect.Func: + return nil, false + } + } + } + return v.Interface(), true +} + +// Types to help sort the keys in a map for reproducible output. + +type rvs []reflect.Value + +func (x rvs) Len() int { return len(x) } +func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +type rvInts struct{ rvs } + +func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() } + +type rvUints struct{ rvs } + +func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() } + +type rvFloats struct{ rvs } + +func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() } + +type rvStrings struct{ rvs } + +func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() } + +// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys. +func sortKeys(v []reflect.Value) []reflect.Value { + if len(v) <= 1 { + return v + } + switch v[0].Kind() { + case reflect.Float32, reflect.Float64: + sort.Sort(rvFloats{v}) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + sort.Sort(rvInts{v}) + case reflect.String: + sort.Sort(rvStrings{v}) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + sort.Sort(rvUints{v}) + } + return v +} diff --git a/vendor/github.com/alecthomas/template/funcs.go b/vendor/github.com/alecthomas/template/funcs.go new file mode 100644 index 0000000..39ee5ed --- /dev/null +++ b/vendor/github.com/alecthomas/template/funcs.go @@ -0,0 +1,598 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/url" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// FuncMap is the type of the map defining the mapping from names to functions. +// Each function must have either a single return value, or two return values of +// which the second has type error. In that case, if the second (error) +// return value evaluates to non-nil during execution, execution terminates and +// Execute returns that error. +type FuncMap map[string]interface{} + +var builtins = FuncMap{ + "and": and, + "call": call, + "html": HTMLEscaper, + "index": index, + "js": JSEscaper, + "len": length, + "not": not, + "or": or, + "print": fmt.Sprint, + "printf": fmt.Sprintf, + "println": fmt.Sprintln, + "urlquery": URLQueryEscaper, + + // Comparisons + "eq": eq, // == + "ge": ge, // >= + "gt": gt, // > + "le": le, // <= + "lt": lt, // < + "ne": ne, // != +} + +var builtinFuncs = createValueFuncs(builtins) + +// createValueFuncs turns a FuncMap into a map[string]reflect.Value +func createValueFuncs(funcMap FuncMap) map[string]reflect.Value { + m := make(map[string]reflect.Value) + addValueFuncs(m, funcMap) + return m +} + +// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values. +func addValueFuncs(out map[string]reflect.Value, in FuncMap) { + for name, fn := range in { + v := reflect.ValueOf(fn) + if v.Kind() != reflect.Func { + panic("value for " + name + " not a function") + } + if !goodFunc(v.Type()) { + panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut())) + } + out[name] = v + } +} + +// addFuncs adds to values the functions in funcs. It does no checking of the input - +// call addValueFuncs first. +func addFuncs(out, in FuncMap) { + for name, fn := range in { + out[name] = fn + } +} + +// goodFunc checks that the function or method has the right result signature. +func goodFunc(typ reflect.Type) bool { + // We allow functions with 1 result or 2 results where the second is an error. + switch { + case typ.NumOut() == 1: + return true + case typ.NumOut() == 2 && typ.Out(1) == errorType: + return true + } + return false +} + +// findFunction looks for a function in the template, and global map. +func findFunction(name string, tmpl *Template) (reflect.Value, bool) { + if tmpl != nil && tmpl.common != nil { + if fn := tmpl.execFuncs[name]; fn.IsValid() { + return fn, true + } + } + if fn := builtinFuncs[name]; fn.IsValid() { + return fn, true + } + return reflect.Value{}, false +} + +// Indexing. + +// index returns the result of indexing its first argument by the following +// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each +// indexed item must be a map, slice, or array. +func index(item interface{}, indices ...interface{}) (interface{}, error) { + v := reflect.ValueOf(item) + for _, i := range indices { + index := reflect.ValueOf(i) + var isNil bool + if v, isNil = indirect(v); isNil { + return nil, fmt.Errorf("index of nil pointer") + } + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.String: + var x int64 + switch index.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x = index.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x = int64(index.Uint()) + default: + return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type()) + } + if x < 0 || x >= int64(v.Len()) { + return nil, fmt.Errorf("index out of range: %d", x) + } + v = v.Index(int(x)) + case reflect.Map: + if !index.IsValid() { + index = reflect.Zero(v.Type().Key()) + } + if !index.Type().AssignableTo(v.Type().Key()) { + return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type()) + } + if x := v.MapIndex(index); x.IsValid() { + v = x + } else { + v = reflect.Zero(v.Type().Elem()) + } + default: + return nil, fmt.Errorf("can't index item of type %s", v.Type()) + } + } + return v.Interface(), nil +} + +// Length + +// length returns the length of the item, with an error if it has no defined length. +func length(item interface{}) (int, error) { + v, isNil := indirect(reflect.ValueOf(item)) + if isNil { + return 0, fmt.Errorf("len of nil pointer") + } + switch v.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return v.Len(), nil + } + return 0, fmt.Errorf("len of type %s", v.Type()) +} + +// Function invocation + +// call returns the result of evaluating the first argument as a function. +// The function must return 1 result, or 2 results, the second of which is an error. +func call(fn interface{}, args ...interface{}) (interface{}, error) { + v := reflect.ValueOf(fn) + typ := v.Type() + if typ.Kind() != reflect.Func { + return nil, fmt.Errorf("non-function of type %s", typ) + } + if !goodFunc(typ) { + return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut()) + } + numIn := typ.NumIn() + var dddType reflect.Type + if typ.IsVariadic() { + if len(args) < numIn-1 { + return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1) + } + dddType = typ.In(numIn - 1).Elem() + } else { + if len(args) != numIn { + return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn) + } + } + argv := make([]reflect.Value, len(args)) + for i, arg := range args { + value := reflect.ValueOf(arg) + // Compute the expected type. Clumsy because of variadics. + var argType reflect.Type + if !typ.IsVariadic() || i < numIn-1 { + argType = typ.In(i) + } else { + argType = dddType + } + if !value.IsValid() && canBeNil(argType) { + value = reflect.Zero(argType) + } + if !value.Type().AssignableTo(argType) { + return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType) + } + argv[i] = value + } + result := v.Call(argv) + if len(result) == 2 && !result[1].IsNil() { + return result[0].Interface(), result[1].Interface().(error) + } + return result[0].Interface(), nil +} + +// Boolean logic. + +func truth(a interface{}) bool { + t, _ := isTrue(reflect.ValueOf(a)) + return t +} + +// and computes the Boolean AND of its arguments, returning +// the first false argument it encounters, or the last argument. +func and(arg0 interface{}, args ...interface{}) interface{} { + if !truth(arg0) { + return arg0 + } + for i := range args { + arg0 = args[i] + if !truth(arg0) { + break + } + } + return arg0 +} + +// or computes the Boolean OR of its arguments, returning +// the first true argument it encounters, or the last argument. +func or(arg0 interface{}, args ...interface{}) interface{} { + if truth(arg0) { + return arg0 + } + for i := range args { + arg0 = args[i] + if truth(arg0) { + break + } + } + return arg0 +} + +// not returns the Boolean negation of its argument. +func not(arg interface{}) (truth bool) { + truth, _ = isTrue(reflect.ValueOf(arg)) + return !truth +} + +// Comparison. + +// TODO: Perhaps allow comparison between signed and unsigned integers. + +var ( + errBadComparisonType = errors.New("invalid type for comparison") + errBadComparison = errors.New("incompatible types for comparison") + errNoComparison = errors.New("missing argument for comparison") +) + +type kind int + +const ( + invalidKind kind = iota + boolKind + complexKind + intKind + floatKind + integerKind + stringKind + uintKind +) + +func basicKind(v reflect.Value) (kind, error) { + switch v.Kind() { + case reflect.Bool: + return boolKind, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intKind, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintKind, nil + case reflect.Float32, reflect.Float64: + return floatKind, nil + case reflect.Complex64, reflect.Complex128: + return complexKind, nil + case reflect.String: + return stringKind, nil + } + return invalidKind, errBadComparisonType +} + +// eq evaluates the comparison a == b || a == c || ... +func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + if len(arg2) == 0 { + return false, errNoComparison + } + for _, arg := range arg2 { + v2 := reflect.ValueOf(arg) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind: + truth = v1.Bool() == v2.Bool() + case complexKind: + truth = v1.Complex() == v2.Complex() + case floatKind: + truth = v1.Float() == v2.Float() + case intKind: + truth = v1.Int() == v2.Int() + case stringKind: + truth = v1.String() == v2.String() + case uintKind: + truth = v1.Uint() == v2.Uint() + default: + panic("invalid kind") + } + } + if truth { + return true, nil + } + } + return false, nil +} + +// ne evaluates the comparison a != b. +func ne(arg1, arg2 interface{}) (bool, error) { + // != is the inverse of ==. + equal, err := eq(arg1, arg2) + return !equal, err +} + +// lt evaluates the comparison a < b. +func lt(arg1, arg2 interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + v2 := reflect.ValueOf(arg2) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind, complexKind: + return false, errBadComparisonType + case floatKind: + truth = v1.Float() < v2.Float() + case intKind: + truth = v1.Int() < v2.Int() + case stringKind: + truth = v1.String() < v2.String() + case uintKind: + truth = v1.Uint() < v2.Uint() + default: + panic("invalid kind") + } + } + return truth, nil +} + +// le evaluates the comparison <= b. +func le(arg1, arg2 interface{}) (bool, error) { + // <= is < or ==. + lessThan, err := lt(arg1, arg2) + if lessThan || err != nil { + return lessThan, err + } + return eq(arg1, arg2) +} + +// gt evaluates the comparison a > b. +func gt(arg1, arg2 interface{}) (bool, error) { + // > is the inverse of <=. + lessOrEqual, err := le(arg1, arg2) + if err != nil { + return false, err + } + return !lessOrEqual, nil +} + +// ge evaluates the comparison a >= b. +func ge(arg1, arg2 interface{}) (bool, error) { + // >= is the inverse of <. + lessThan, err := lt(arg1, arg2) + if err != nil { + return false, err + } + return !lessThan, nil +} + +// HTML escaping. + +var ( + htmlQuot = []byte(""") // shorter than """ + htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5 + htmlAmp = []byte("&") + htmlLt = []byte("<") + htmlGt = []byte(">") +) + +// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. +func HTMLEscape(w io.Writer, b []byte) { + last := 0 + for i, c := range b { + var html []byte + switch c { + case '"': + html = htmlQuot + case '\'': + html = htmlApos + case '&': + html = htmlAmp + case '<': + html = htmlLt + case '>': + html = htmlGt + default: + continue + } + w.Write(b[last:i]) + w.Write(html) + last = i + 1 + } + w.Write(b[last:]) +} + +// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. +func HTMLEscapeString(s string) string { + // Avoid allocation if we can. + if strings.IndexAny(s, `'"&<>`) < 0 { + return s + } + var b bytes.Buffer + HTMLEscape(&b, []byte(s)) + return b.String() +} + +// HTMLEscaper returns the escaped HTML equivalent of the textual +// representation of its arguments. +func HTMLEscaper(args ...interface{}) string { + return HTMLEscapeString(evalArgs(args)) +} + +// JavaScript escaping. + +var ( + jsLowUni = []byte(`\u00`) + hex = []byte("0123456789ABCDEF") + + jsBackslash = []byte(`\\`) + jsApos = []byte(`\'`) + jsQuot = []byte(`\"`) + jsLt = []byte(`\x3C`) + jsGt = []byte(`\x3E`) +) + +// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. +func JSEscape(w io.Writer, b []byte) { + last := 0 + for i := 0; i < len(b); i++ { + c := b[i] + + if !jsIsSpecial(rune(c)) { + // fast path: nothing to do + continue + } + w.Write(b[last:i]) + + if c < utf8.RuneSelf { + // Quotes, slashes and angle brackets get quoted. + // Control characters get written as \u00XX. + switch c { + case '\\': + w.Write(jsBackslash) + case '\'': + w.Write(jsApos) + case '"': + w.Write(jsQuot) + case '<': + w.Write(jsLt) + case '>': + w.Write(jsGt) + default: + w.Write(jsLowUni) + t, b := c>>4, c&0x0f + w.Write(hex[t : t+1]) + w.Write(hex[b : b+1]) + } + } else { + // Unicode rune. + r, size := utf8.DecodeRune(b[i:]) + if unicode.IsPrint(r) { + w.Write(b[i : i+size]) + } else { + fmt.Fprintf(w, "\\u%04X", r) + } + i += size - 1 + } + last = i + 1 + } + w.Write(b[last:]) +} + +// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. +func JSEscapeString(s string) string { + // Avoid allocation if we can. + if strings.IndexFunc(s, jsIsSpecial) < 0 { + return s + } + var b bytes.Buffer + JSEscape(&b, []byte(s)) + return b.String() +} + +func jsIsSpecial(r rune) bool { + switch r { + case '\\', '\'', '"', '<', '>': + return true + } + return r < ' ' || utf8.RuneSelf <= r +} + +// JSEscaper returns the escaped JavaScript equivalent of the textual +// representation of its arguments. +func JSEscaper(args ...interface{}) string { + return JSEscapeString(evalArgs(args)) +} + +// URLQueryEscaper returns the escaped value of the textual representation of +// its arguments in a form suitable for embedding in a URL query. +func URLQueryEscaper(args ...interface{}) string { + return url.QueryEscape(evalArgs(args)) +} + +// evalArgs formats the list of arguments into a string. It is therefore equivalent to +// fmt.Sprint(args...) +// except that each argument is indirected (if a pointer), as required, +// using the same rules as the default string evaluation during template +// execution. +func evalArgs(args []interface{}) string { + ok := false + var s string + // Fast path for simple common case. + if len(args) == 1 { + s, ok = args[0].(string) + } + if !ok { + for i, arg := range args { + a, ok := printableValue(reflect.ValueOf(arg)) + if ok { + args[i] = a + } // else left fmt do its thing + } + s = fmt.Sprint(args...) + } + return s +} diff --git a/vendor/github.com/alecthomas/template/helper.go b/vendor/github.com/alecthomas/template/helper.go new file mode 100644 index 0000000..3636fb5 --- /dev/null +++ b/vendor/github.com/alecthomas/template/helper.go @@ -0,0 +1,108 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Helper functions to make constructing templates easier. + +package template + +import ( + "fmt" + "io/ioutil" + "path/filepath" +) + +// Functions and methods to parse templates. + +// Must is a helper that wraps a call to a function returning (*Template, error) +// and panics if the error is non-nil. It is intended for use in variable +// initializations such as +// var t = template.Must(template.New("name").Parse("text")) +func Must(t *Template, err error) *Template { + if err != nil { + panic(err) + } + return t +} + +// ParseFiles creates a new Template and parses the template definitions from +// the named files. The returned template's name will have the (base) name and +// (parsed) contents of the first file. There must be at least one file. +// If an error occurs, parsing stops and the returned *Template is nil. +func ParseFiles(filenames ...string) (*Template, error) { + return parseFiles(nil, filenames...) +} + +// ParseFiles parses the named files and associates the resulting templates with +// t. If an error occurs, parsing stops and the returned template is nil; +// otherwise it is t. There must be at least one file. +func (t *Template) ParseFiles(filenames ...string) (*Template, error) { + return parseFiles(t, filenames...) +} + +// parseFiles is the helper for the method and function. If the argument +// template is nil, it is created from the first file. +func parseFiles(t *Template, filenames ...string) (*Template, error) { + if len(filenames) == 0 { + // Not really a problem, but be consistent. + return nil, fmt.Errorf("template: no files named in call to ParseFiles") + } + for _, filename := range filenames { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + s := string(b) + name := filepath.Base(filename) + // First template becomes return value if not already defined, + // and we use that one for subsequent New calls to associate + // all the templates together. Also, if this file has the same name + // as t, this file becomes the contents of t, so + // t, err := New(name).Funcs(xxx).ParseFiles(name) + // works. Otherwise we create a new template associated with t. + var tmpl *Template + if t == nil { + t = New(name) + } + if name == t.Name() { + tmpl = t + } else { + tmpl = t.New(name) + } + _, err = tmpl.Parse(s) + if err != nil { + return nil, err + } + } + return t, nil +} + +// ParseGlob creates a new Template and parses the template definitions from the +// files identified by the pattern, which must match at least one file. The +// returned template will have the (base) name and (parsed) contents of the +// first file matched by the pattern. ParseGlob is equivalent to calling +// ParseFiles with the list of files matched by the pattern. +func ParseGlob(pattern string) (*Template, error) { + return parseGlob(nil, pattern) +} + +// ParseGlob parses the template definitions in the files identified by the +// pattern and associates the resulting templates with t. The pattern is +// processed by filepath.Glob and must match at least one file. ParseGlob is +// equivalent to calling t.ParseFiles with the list of files matched by the +// pattern. +func (t *Template) ParseGlob(pattern string) (*Template, error) { + return parseGlob(t, pattern) +} + +// parseGlob is the implementation of the function and method ParseGlob. +func parseGlob(t *Template, pattern string) (*Template, error) { + filenames, err := filepath.Glob(pattern) + if err != nil { + return nil, err + } + if len(filenames) == 0 { + return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern) + } + return parseFiles(t, filenames...) +} diff --git a/vendor/github.com/alecthomas/template/parse/lex.go b/vendor/github.com/alecthomas/template/parse/lex.go new file mode 100644 index 0000000..55f1c05 --- /dev/null +++ b/vendor/github.com/alecthomas/template/parse/lex.go @@ -0,0 +1,556 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package parse + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +// item represents a token or text string returned from the scanner. +type item struct { + typ itemType // The type of this item. + pos Pos // The starting position, in bytes, of this item in the input string. + val string // The value of this item. +} + +func (i item) String() string { + switch { + case i.typ == itemEOF: + return "EOF" + case i.typ == itemError: + return i.val + case i.typ > itemKeyword: + return fmt.Sprintf("<%s>", i.val) + case len(i.val) > 10: + return fmt.Sprintf("%.10q...", i.val) + } + return fmt.Sprintf("%q", i.val) +} + +// itemType identifies the type of lex items. +type itemType int + +const ( + itemError itemType = iota // error occurred; value is text of error + itemBool // boolean constant + itemChar // printable ASCII character; grab bag for comma etc. + itemCharConstant // character constant + itemComplex // complex constant (1+2i); imaginary is just a number + itemColonEquals // colon-equals (':=') introducing a declaration + itemEOF + itemField // alphanumeric identifier starting with '.' + itemIdentifier // alphanumeric identifier not starting with '.' + itemLeftDelim // left action delimiter + itemLeftParen // '(' inside action + itemNumber // simple number, including imaginary + itemPipe // pipe symbol + itemRawString // raw quoted string (includes quotes) + itemRightDelim // right action delimiter + itemElideNewline // elide newline after right delim + itemRightParen // ')' inside action + itemSpace // run of spaces separating arguments + itemString // quoted string (includes quotes) + itemText // plain text + itemVariable // variable starting with '$', such as '$' or '$1' or '$hello' + // Keywords appear after all the rest. + itemKeyword // used only to delimit the keywords + itemDot // the cursor, spelled '.' + itemDefine // define keyword + itemElse // else keyword + itemEnd // end keyword + itemIf // if keyword + itemNil // the untyped nil constant, easiest to treat as a keyword + itemRange // range keyword + itemTemplate // template keyword + itemWith // with keyword +) + +var key = map[string]itemType{ + ".": itemDot, + "define": itemDefine, + "else": itemElse, + "end": itemEnd, + "if": itemIf, + "range": itemRange, + "nil": itemNil, + "template": itemTemplate, + "with": itemWith, +} + +const eof = -1 + +// stateFn represents the state of the scanner as a function that returns the next state. +type stateFn func(*lexer) stateFn + +// lexer holds the state of the scanner. +type lexer struct { + name string // the name of the input; used only for error reports + input string // the string being scanned + leftDelim string // start of action + rightDelim string // end of action + state stateFn // the next lexing function to enter + pos Pos // current position in the input + start Pos // start position of this item + width Pos // width of last rune read from input + lastPos Pos // position of most recent item returned by nextItem + items chan item // channel of scanned items + parenDepth int // nesting depth of ( ) exprs +} + +// next returns the next rune in the input. +func (l *lexer) next() rune { + if int(l.pos) >= len(l.input) { + l.width = 0 + return eof + } + r, w := utf8.DecodeRuneInString(l.input[l.pos:]) + l.width = Pos(w) + l.pos += l.width + return r +} + +// peek returns but does not consume the next rune in the input. +func (l *lexer) peek() rune { + r := l.next() + l.backup() + return r +} + +// backup steps back one rune. Can only be called once per call of next. +func (l *lexer) backup() { + l.pos -= l.width +} + +// emit passes an item back to the client. +func (l *lexer) emit(t itemType) { + l.items <- item{t, l.start, l.input[l.start:l.pos]} + l.start = l.pos +} + +// ignore skips over the pending input before this point. +func (l *lexer) ignore() { + l.start = l.pos +} + +// accept consumes the next rune if it's from the valid set. +func (l *lexer) accept(valid string) bool { + if strings.IndexRune(valid, l.next()) >= 0 { + return true + } + l.backup() + return false +} + +// acceptRun consumes a run of runes from the valid set. +func (l *lexer) acceptRun(valid string) { + for strings.IndexRune(valid, l.next()) >= 0 { + } + l.backup() +} + +// lineNumber reports which line we're on, based on the position of +// the previous item returned by nextItem. Doing it this way +// means we don't have to worry about peek double counting. +func (l *lexer) lineNumber() int { + return 1 + strings.Count(l.input[:l.lastPos], "\n") +} + +// errorf returns an error token and terminates the scan by passing +// back a nil pointer that will be the next state, terminating l.nextItem. +func (l *lexer) errorf(format string, args ...interface{}) stateFn { + l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} + return nil +} + +// nextItem returns the next item from the input. +func (l *lexer) nextItem() item { + item := <-l.items + l.lastPos = item.pos + return item +} + +// lex creates a new scanner for the input string. +func lex(name, input, left, right string) *lexer { + if left == "" { + left = leftDelim + } + if right == "" { + right = rightDelim + } + l := &lexer{ + name: name, + input: input, + leftDelim: left, + rightDelim: right, + items: make(chan item), + } + go l.run() + return l +} + +// run runs the state machine for the lexer. +func (l *lexer) run() { + for l.state = lexText; l.state != nil; { + l.state = l.state(l) + } +} + +// state functions + +const ( + leftDelim = "{{" + rightDelim = "}}" + leftComment = "/*" + rightComment = "*/" +) + +// lexText scans until an opening action delimiter, "{{". +func lexText(l *lexer) stateFn { + for { + if strings.HasPrefix(l.input[l.pos:], l.leftDelim) { + if l.pos > l.start { + l.emit(itemText) + } + return lexLeftDelim + } + if l.next() == eof { + break + } + } + // Correctly reached EOF. + if l.pos > l.start { + l.emit(itemText) + } + l.emit(itemEOF) + return nil +} + +// lexLeftDelim scans the left delimiter, which is known to be present. +func lexLeftDelim(l *lexer) stateFn { + l.pos += Pos(len(l.leftDelim)) + if strings.HasPrefix(l.input[l.pos:], leftComment) { + return lexComment + } + l.emit(itemLeftDelim) + l.parenDepth = 0 + return lexInsideAction +} + +// lexComment scans a comment. The left comment marker is known to be present. +func lexComment(l *lexer) stateFn { + l.pos += Pos(len(leftComment)) + i := strings.Index(l.input[l.pos:], rightComment) + if i < 0 { + return l.errorf("unclosed comment") + } + l.pos += Pos(i + len(rightComment)) + if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) { + return l.errorf("comment ends before closing delimiter") + + } + l.pos += Pos(len(l.rightDelim)) + l.ignore() + return lexText +} + +// lexRightDelim scans the right delimiter, which is known to be present. +func lexRightDelim(l *lexer) stateFn { + l.pos += Pos(len(l.rightDelim)) + l.emit(itemRightDelim) + if l.peek() == '\\' { + l.pos++ + l.emit(itemElideNewline) + } + return lexText +} + +// lexInsideAction scans the elements inside action delimiters. +func lexInsideAction(l *lexer) stateFn { + // Either number, quoted string, or identifier. + // Spaces separate arguments; runs of spaces turn into itemSpace. + // Pipe symbols separate and are emitted. + if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) { + if l.parenDepth == 0 { + return lexRightDelim + } + return l.errorf("unclosed left paren") + } + switch r := l.next(); { + case r == eof || isEndOfLine(r): + return l.errorf("unclosed action") + case isSpace(r): + return lexSpace + case r == ':': + if l.next() != '=' { + return l.errorf("expected :=") + } + l.emit(itemColonEquals) + case r == '|': + l.emit(itemPipe) + case r == '"': + return lexQuote + case r == '`': + return lexRawQuote + case r == '$': + return lexVariable + case r == '\'': + return lexChar + case r == '.': + // special look-ahead for ".field" so we don't break l.backup(). + if l.pos < Pos(len(l.input)) { + r := l.input[l.pos] + if r < '0' || '9' < r { + return lexField + } + } + fallthrough // '.' can start a number. + case r == '+' || r == '-' || ('0' <= r && r <= '9'): + l.backup() + return lexNumber + case isAlphaNumeric(r): + l.backup() + return lexIdentifier + case r == '(': + l.emit(itemLeftParen) + l.parenDepth++ + return lexInsideAction + case r == ')': + l.emit(itemRightParen) + l.parenDepth-- + if l.parenDepth < 0 { + return l.errorf("unexpected right paren %#U", r) + } + return lexInsideAction + case r <= unicode.MaxASCII && unicode.IsPrint(r): + l.emit(itemChar) + return lexInsideAction + default: + return l.errorf("unrecognized character in action: %#U", r) + } + return lexInsideAction +} + +// lexSpace scans a run of space characters. +// One space has already been seen. +func lexSpace(l *lexer) stateFn { + for isSpace(l.peek()) { + l.next() + } + l.emit(itemSpace) + return lexInsideAction +} + +// lexIdentifier scans an alphanumeric. +func lexIdentifier(l *lexer) stateFn { +Loop: + for { + switch r := l.next(); { + case isAlphaNumeric(r): + // absorb. + default: + l.backup() + word := l.input[l.start:l.pos] + if !l.atTerminator() { + return l.errorf("bad character %#U", r) + } + switch { + case key[word] > itemKeyword: + l.emit(key[word]) + case word[0] == '.': + l.emit(itemField) + case word == "true", word == "false": + l.emit(itemBool) + default: + l.emit(itemIdentifier) + } + break Loop + } + } + return lexInsideAction +} + +// lexField scans a field: .Alphanumeric. +// The . has been scanned. +func lexField(l *lexer) stateFn { + return lexFieldOrVariable(l, itemField) +} + +// lexVariable scans a Variable: $Alphanumeric. +// The $ has been scanned. +func lexVariable(l *lexer) stateFn { + if l.atTerminator() { // Nothing interesting follows -> "$". + l.emit(itemVariable) + return lexInsideAction + } + return lexFieldOrVariable(l, itemVariable) +} + +// lexVariable scans a field or variable: [.$]Alphanumeric. +// The . or $ has been scanned. +func lexFieldOrVariable(l *lexer, typ itemType) stateFn { + if l.atTerminator() { // Nothing interesting follows -> "." or "$". + if typ == itemVariable { + l.emit(itemVariable) + } else { + l.emit(itemDot) + } + return lexInsideAction + } + var r rune + for { + r = l.next() + if !isAlphaNumeric(r) { + l.backup() + break + } + } + if !l.atTerminator() { + return l.errorf("bad character %#U", r) + } + l.emit(typ) + return lexInsideAction +} + +// atTerminator reports whether the input is at valid termination character to +// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases +// like "$x+2" not being acceptable without a space, in case we decide one +// day to implement arithmetic. +func (l *lexer) atTerminator() bool { + r := l.peek() + if isSpace(r) || isEndOfLine(r) { + return true + } + switch r { + case eof, '.', ',', '|', ':', ')', '(': + return true + } + // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will + // succeed but should fail) but only in extremely rare cases caused by willfully + // bad choice of delimiter. + if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r { + return true + } + return false +} + +// lexChar scans a character constant. The initial quote is already +// scanned. Syntax checking is done by the parser. +func lexChar(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != eof && r != '\n' { + break + } + fallthrough + case eof, '\n': + return l.errorf("unterminated character constant") + case '\'': + break Loop + } + } + l.emit(itemCharConstant) + return lexInsideAction +} + +// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This +// isn't a perfect number scanner - for instance it accepts "." and "0x0.2" +// and "089" - but when it's wrong the input is invalid and the parser (via +// strconv) will notice. +func lexNumber(l *lexer) stateFn { + if !l.scanNumber() { + return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) + } + if sign := l.peek(); sign == '+' || sign == '-' { + // Complex: 1+2i. No spaces, must end in 'i'. + if !l.scanNumber() || l.input[l.pos-1] != 'i' { + return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) + } + l.emit(itemComplex) + } else { + l.emit(itemNumber) + } + return lexInsideAction +} + +func (l *lexer) scanNumber() bool { + // Optional leading sign. + l.accept("+-") + // Is it hex? + digits := "0123456789" + if l.accept("0") && l.accept("xX") { + digits = "0123456789abcdefABCDEF" + } + l.acceptRun(digits) + if l.accept(".") { + l.acceptRun(digits) + } + if l.accept("eE") { + l.accept("+-") + l.acceptRun("0123456789") + } + // Is it imaginary? + l.accept("i") + // Next thing mustn't be alphanumeric. + if isAlphaNumeric(l.peek()) { + l.next() + return false + } + return true +} + +// lexQuote scans a quoted string. +func lexQuote(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != eof && r != '\n' { + break + } + fallthrough + case eof, '\n': + return l.errorf("unterminated quoted string") + case '"': + break Loop + } + } + l.emit(itemString) + return lexInsideAction +} + +// lexRawQuote scans a raw quoted string. +func lexRawQuote(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case eof, '\n': + return l.errorf("unterminated raw quoted string") + case '`': + break Loop + } + } + l.emit(itemRawString) + return lexInsideAction +} + +// isSpace reports whether r is a space character. +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +// isEndOfLine reports whether r is an end-of-line character. +func isEndOfLine(r rune) bool { + return r == '\r' || r == '\n' +} + +// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. +func isAlphaNumeric(r rune) bool { + return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) +} diff --git a/vendor/github.com/alecthomas/template/parse/node.go b/vendor/github.com/alecthomas/template/parse/node.go new file mode 100644 index 0000000..55c37f6 --- /dev/null +++ b/vendor/github.com/alecthomas/template/parse/node.go @@ -0,0 +1,834 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Parse nodes. + +package parse + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +var textFormat = "%s" // Changed to "%q" in tests for better error messages. + +// A Node is an element in the parse tree. The interface is trivial. +// The interface contains an unexported method so that only +// types local to this package can satisfy it. +type Node interface { + Type() NodeType + String() string + // Copy does a deep copy of the Node and all its components. + // To avoid type assertions, some XxxNodes also have specialized + // CopyXxx methods that return *XxxNode. + Copy() Node + Position() Pos // byte position of start of node in full original input string + // tree returns the containing *Tree. + // It is unexported so all implementations of Node are in this package. + tree() *Tree +} + +// NodeType identifies the type of a parse tree node. +type NodeType int + +// Pos represents a byte position in the original input text from which +// this template was parsed. +type Pos int + +func (p Pos) Position() Pos { + return p +} + +// Type returns itself and provides an easy default implementation +// for embedding in a Node. Embedded in all non-trivial Nodes. +func (t NodeType) Type() NodeType { + return t +} + +const ( + NodeText NodeType = iota // Plain text. + NodeAction // A non-control action such as a field evaluation. + NodeBool // A boolean constant. + NodeChain // A sequence of field accesses. + NodeCommand // An element of a pipeline. + NodeDot // The cursor, dot. + nodeElse // An else action. Not added to tree. + nodeEnd // An end action. Not added to tree. + NodeField // A field or method name. + NodeIdentifier // An identifier; always a function name. + NodeIf // An if action. + NodeList // A list of Nodes. + NodeNil // An untyped nil constant. + NodeNumber // A numerical constant. + NodePipe // A pipeline of commands. + NodeRange // A range action. + NodeString // A string constant. + NodeTemplate // A template invocation action. + NodeVariable // A $ variable. + NodeWith // A with action. +) + +// Nodes. + +// ListNode holds a sequence of nodes. +type ListNode struct { + NodeType + Pos + tr *Tree + Nodes []Node // The element nodes in lexical order. +} + +func (t *Tree) newList(pos Pos) *ListNode { + return &ListNode{tr: t, NodeType: NodeList, Pos: pos} +} + +func (l *ListNode) append(n Node) { + l.Nodes = append(l.Nodes, n) +} + +func (l *ListNode) tree() *Tree { + return l.tr +} + +func (l *ListNode) String() string { + b := new(bytes.Buffer) + for _, n := range l.Nodes { + fmt.Fprint(b, n) + } + return b.String() +} + +func (l *ListNode) CopyList() *ListNode { + if l == nil { + return l + } + n := l.tr.newList(l.Pos) + for _, elem := range l.Nodes { + n.append(elem.Copy()) + } + return n +} + +func (l *ListNode) Copy() Node { + return l.CopyList() +} + +// TextNode holds plain text. +type TextNode struct { + NodeType + Pos + tr *Tree + Text []byte // The text; may span newlines. +} + +func (t *Tree) newText(pos Pos, text string) *TextNode { + return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)} +} + +func (t *TextNode) String() string { + return fmt.Sprintf(textFormat, t.Text) +} + +func (t *TextNode) tree() *Tree { + return t.tr +} + +func (t *TextNode) Copy() Node { + return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)} +} + +// PipeNode holds a pipeline with optional declaration +type PipeNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Decl []*VariableNode // Variable declarations in lexical order. + Cmds []*CommandNode // The commands in lexical order. +} + +func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode { + return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl} +} + +func (p *PipeNode) append(command *CommandNode) { + p.Cmds = append(p.Cmds, command) +} + +func (p *PipeNode) String() string { + s := "" + if len(p.Decl) > 0 { + for i, v := range p.Decl { + if i > 0 { + s += ", " + } + s += v.String() + } + s += " := " + } + for i, c := range p.Cmds { + if i > 0 { + s += " | " + } + s += c.String() + } + return s +} + +func (p *PipeNode) tree() *Tree { + return p.tr +} + +func (p *PipeNode) CopyPipe() *PipeNode { + if p == nil { + return p + } + var decl []*VariableNode + for _, d := range p.Decl { + decl = append(decl, d.Copy().(*VariableNode)) + } + n := p.tr.newPipeline(p.Pos, p.Line, decl) + for _, c := range p.Cmds { + n.append(c.Copy().(*CommandNode)) + } + return n +} + +func (p *PipeNode) Copy() Node { + return p.CopyPipe() +} + +// ActionNode holds an action (something bounded by delimiters). +// Control actions have their own nodes; ActionNode represents simple +// ones such as field evaluations and parenthesized pipelines. +type ActionNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Pipe *PipeNode // The pipeline in the action. +} + +func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode { + return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe} +} + +func (a *ActionNode) String() string { + return fmt.Sprintf("{{%s}}", a.Pipe) + +} + +func (a *ActionNode) tree() *Tree { + return a.tr +} + +func (a *ActionNode) Copy() Node { + return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe()) + +} + +// CommandNode holds a command (a pipeline inside an evaluating action). +type CommandNode struct { + NodeType + Pos + tr *Tree + Args []Node // Arguments in lexical order: Identifier, field, or constant. +} + +func (t *Tree) newCommand(pos Pos) *CommandNode { + return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos} +} + +func (c *CommandNode) append(arg Node) { + c.Args = append(c.Args, arg) +} + +func (c *CommandNode) String() string { + s := "" + for i, arg := range c.Args { + if i > 0 { + s += " " + } + if arg, ok := arg.(*PipeNode); ok { + s += "(" + arg.String() + ")" + continue + } + s += arg.String() + } + return s +} + +func (c *CommandNode) tree() *Tree { + return c.tr +} + +func (c *CommandNode) Copy() Node { + if c == nil { + return c + } + n := c.tr.newCommand(c.Pos) + for _, c := range c.Args { + n.append(c.Copy()) + } + return n +} + +// IdentifierNode holds an identifier. +type IdentifierNode struct { + NodeType + Pos + tr *Tree + Ident string // The identifier's name. +} + +// NewIdentifier returns a new IdentifierNode with the given identifier name. +func NewIdentifier(ident string) *IdentifierNode { + return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident} +} + +// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature. +// Chained for convenience. +// TODO: fix one day? +func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode { + i.Pos = pos + return i +} + +// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature. +// Chained for convenience. +// TODO: fix one day? +func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode { + i.tr = t + return i +} + +func (i *IdentifierNode) String() string { + return i.Ident +} + +func (i *IdentifierNode) tree() *Tree { + return i.tr +} + +func (i *IdentifierNode) Copy() Node { + return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos) +} + +// VariableNode holds a list of variable names, possibly with chained field +// accesses. The dollar sign is part of the (first) name. +type VariableNode struct { + NodeType + Pos + tr *Tree + Ident []string // Variable name and fields in lexical order. +} + +func (t *Tree) newVariable(pos Pos, ident string) *VariableNode { + return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")} +} + +func (v *VariableNode) String() string { + s := "" + for i, id := range v.Ident { + if i > 0 { + s += "." + } + s += id + } + return s +} + +func (v *VariableNode) tree() *Tree { + return v.tr +} + +func (v *VariableNode) Copy() Node { + return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)} +} + +// DotNode holds the special identifier '.'. +type DotNode struct { + NodeType + Pos + tr *Tree +} + +func (t *Tree) newDot(pos Pos) *DotNode { + return &DotNode{tr: t, NodeType: NodeDot, Pos: pos} +} + +func (d *DotNode) Type() NodeType { + // Override method on embedded NodeType for API compatibility. + // TODO: Not really a problem; could change API without effect but + // api tool complains. + return NodeDot +} + +func (d *DotNode) String() string { + return "." +} + +func (d *DotNode) tree() *Tree { + return d.tr +} + +func (d *DotNode) Copy() Node { + return d.tr.newDot(d.Pos) +} + +// NilNode holds the special identifier 'nil' representing an untyped nil constant. +type NilNode struct { + NodeType + Pos + tr *Tree +} + +func (t *Tree) newNil(pos Pos) *NilNode { + return &NilNode{tr: t, NodeType: NodeNil, Pos: pos} +} + +func (n *NilNode) Type() NodeType { + // Override method on embedded NodeType for API compatibility. + // TODO: Not really a problem; could change API without effect but + // api tool complains. + return NodeNil +} + +func (n *NilNode) String() string { + return "nil" +} + +func (n *NilNode) tree() *Tree { + return n.tr +} + +func (n *NilNode) Copy() Node { + return n.tr.newNil(n.Pos) +} + +// FieldNode holds a field (identifier starting with '.'). +// The names may be chained ('.x.y'). +// The period is dropped from each ident. +type FieldNode struct { + NodeType + Pos + tr *Tree + Ident []string // The identifiers in lexical order. +} + +func (t *Tree) newField(pos Pos, ident string) *FieldNode { + return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period +} + +func (f *FieldNode) String() string { + s := "" + for _, id := range f.Ident { + s += "." + id + } + return s +} + +func (f *FieldNode) tree() *Tree { + return f.tr +} + +func (f *FieldNode) Copy() Node { + return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)} +} + +// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.'). +// The names may be chained ('.x.y'). +// The periods are dropped from each ident. +type ChainNode struct { + NodeType + Pos + tr *Tree + Node Node + Field []string // The identifiers in lexical order. +} + +func (t *Tree) newChain(pos Pos, node Node) *ChainNode { + return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node} +} + +// Add adds the named field (which should start with a period) to the end of the chain. +func (c *ChainNode) Add(field string) { + if len(field) == 0 || field[0] != '.' { + panic("no dot in field") + } + field = field[1:] // Remove leading dot. + if field == "" { + panic("empty field") + } + c.Field = append(c.Field, field) +} + +func (c *ChainNode) String() string { + s := c.Node.String() + if _, ok := c.Node.(*PipeNode); ok { + s = "(" + s + ")" + } + for _, field := range c.Field { + s += "." + field + } + return s +} + +func (c *ChainNode) tree() *Tree { + return c.tr +} + +func (c *ChainNode) Copy() Node { + return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)} +} + +// BoolNode holds a boolean constant. +type BoolNode struct { + NodeType + Pos + tr *Tree + True bool // The value of the boolean constant. +} + +func (t *Tree) newBool(pos Pos, true bool) *BoolNode { + return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true} +} + +func (b *BoolNode) String() string { + if b.True { + return "true" + } + return "false" +} + +func (b *BoolNode) tree() *Tree { + return b.tr +} + +func (b *BoolNode) Copy() Node { + return b.tr.newBool(b.Pos, b.True) +} + +// NumberNode holds a number: signed or unsigned integer, float, or complex. +// The value is parsed and stored under all the types that can represent the value. +// This simulates in a small amount of code the behavior of Go's ideal constants. +type NumberNode struct { + NodeType + Pos + tr *Tree + IsInt bool // Number has an integral value. + IsUint bool // Number has an unsigned integral value. + IsFloat bool // Number has a floating-point value. + IsComplex bool // Number is complex. + Int64 int64 // The signed integer value. + Uint64 uint64 // The unsigned integer value. + Float64 float64 // The floating-point value. + Complex128 complex128 // The complex value. + Text string // The original textual representation from the input. +} + +func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) { + n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text} + switch typ { + case itemCharConstant: + rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0]) + if err != nil { + return nil, err + } + if tail != "'" { + return nil, fmt.Errorf("malformed character constant: %s", text) + } + n.Int64 = int64(rune) + n.IsInt = true + n.Uint64 = uint64(rune) + n.IsUint = true + n.Float64 = float64(rune) // odd but those are the rules. + n.IsFloat = true + return n, nil + case itemComplex: + // fmt.Sscan can parse the pair, so let it do the work. + if _, err := fmt.Sscan(text, &n.Complex128); err != nil { + return nil, err + } + n.IsComplex = true + n.simplifyComplex() + return n, nil + } + // Imaginary constants can only be complex unless they are zero. + if len(text) > 0 && text[len(text)-1] == 'i' { + f, err := strconv.ParseFloat(text[:len(text)-1], 64) + if err == nil { + n.IsComplex = true + n.Complex128 = complex(0, f) + n.simplifyComplex() + return n, nil + } + } + // Do integer test first so we get 0x123 etc. + u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below. + if err == nil { + n.IsUint = true + n.Uint64 = u + } + i, err := strconv.ParseInt(text, 0, 64) + if err == nil { + n.IsInt = true + n.Int64 = i + if i == 0 { + n.IsUint = true // in case of -0. + n.Uint64 = u + } + } + // If an integer extraction succeeded, promote the float. + if n.IsInt { + n.IsFloat = true + n.Float64 = float64(n.Int64) + } else if n.IsUint { + n.IsFloat = true + n.Float64 = float64(n.Uint64) + } else { + f, err := strconv.ParseFloat(text, 64) + if err == nil { + n.IsFloat = true + n.Float64 = f + // If a floating-point extraction succeeded, extract the int if needed. + if !n.IsInt && float64(int64(f)) == f { + n.IsInt = true + n.Int64 = int64(f) + } + if !n.IsUint && float64(uint64(f)) == f { + n.IsUint = true + n.Uint64 = uint64(f) + } + } + } + if !n.IsInt && !n.IsUint && !n.IsFloat { + return nil, fmt.Errorf("illegal number syntax: %q", text) + } + return n, nil +} + +// simplifyComplex pulls out any other types that are represented by the complex number. +// These all require that the imaginary part be zero. +func (n *NumberNode) simplifyComplex() { + n.IsFloat = imag(n.Complex128) == 0 + if n.IsFloat { + n.Float64 = real(n.Complex128) + n.IsInt = float64(int64(n.Float64)) == n.Float64 + if n.IsInt { + n.Int64 = int64(n.Float64) + } + n.IsUint = float64(uint64(n.Float64)) == n.Float64 + if n.IsUint { + n.Uint64 = uint64(n.Float64) + } + } +} + +func (n *NumberNode) String() string { + return n.Text +} + +func (n *NumberNode) tree() *Tree { + return n.tr +} + +func (n *NumberNode) Copy() Node { + nn := new(NumberNode) + *nn = *n // Easy, fast, correct. + return nn +} + +// StringNode holds a string constant. The value has been "unquoted". +type StringNode struct { + NodeType + Pos + tr *Tree + Quoted string // The original text of the string, with quotes. + Text string // The string, after quote processing. +} + +func (t *Tree) newString(pos Pos, orig, text string) *StringNode { + return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text} +} + +func (s *StringNode) String() string { + return s.Quoted +} + +func (s *StringNode) tree() *Tree { + return s.tr +} + +func (s *StringNode) Copy() Node { + return s.tr.newString(s.Pos, s.Quoted, s.Text) +} + +// endNode represents an {{end}} action. +// It does not appear in the final parse tree. +type endNode struct { + NodeType + Pos + tr *Tree +} + +func (t *Tree) newEnd(pos Pos) *endNode { + return &endNode{tr: t, NodeType: nodeEnd, Pos: pos} +} + +func (e *endNode) String() string { + return "{{end}}" +} + +func (e *endNode) tree() *Tree { + return e.tr +} + +func (e *endNode) Copy() Node { + return e.tr.newEnd(e.Pos) +} + +// elseNode represents an {{else}} action. Does not appear in the final tree. +type elseNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) +} + +func (t *Tree) newElse(pos Pos, line int) *elseNode { + return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line} +} + +func (e *elseNode) Type() NodeType { + return nodeElse +} + +func (e *elseNode) String() string { + return "{{else}}" +} + +func (e *elseNode) tree() *Tree { + return e.tr +} + +func (e *elseNode) Copy() Node { + return e.tr.newElse(e.Pos, e.Line) +} + +// BranchNode is the common representation of if, range, and with. +type BranchNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Pipe *PipeNode // The pipeline to be evaluated. + List *ListNode // What to execute if the value is non-empty. + ElseList *ListNode // What to execute if the value is empty (nil if absent). +} + +func (b *BranchNode) String() string { + name := "" + switch b.NodeType { + case NodeIf: + name = "if" + case NodeRange: + name = "range" + case NodeWith: + name = "with" + default: + panic("unknown branch type") + } + if b.ElseList != nil { + return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList) + } + return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List) +} + +func (b *BranchNode) tree() *Tree { + return b.tr +} + +func (b *BranchNode) Copy() Node { + switch b.NodeType { + case NodeIf: + return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + case NodeRange: + return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + case NodeWith: + return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + default: + panic("unknown branch type") + } +} + +// IfNode represents an {{if}} action and its commands. +type IfNode struct { + BranchNode +} + +func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode { + return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +} + +func (i *IfNode) Copy() Node { + return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList()) +} + +// RangeNode represents a {{range}} action and its commands. +type RangeNode struct { + BranchNode +} + +func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode { + return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +} + +func (r *RangeNode) Copy() Node { + return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList()) +} + +// WithNode represents a {{with}} action and its commands. +type WithNode struct { + BranchNode +} + +func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode { + return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +} + +func (w *WithNode) Copy() Node { + return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList()) +} + +// TemplateNode represents a {{template}} action. +type TemplateNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Name string // The name of the template (unquoted). + Pipe *PipeNode // The command to evaluate as dot for the template. +} + +func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode { + return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe} +} + +func (t *TemplateNode) String() string { + if t.Pipe == nil { + return fmt.Sprintf("{{template %q}}", t.Name) + } + return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe) +} + +func (t *TemplateNode) tree() *Tree { + return t.tr +} + +func (t *TemplateNode) Copy() Node { + return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe()) +} diff --git a/vendor/github.com/alecthomas/template/parse/parse.go b/vendor/github.com/alecthomas/template/parse/parse.go new file mode 100644 index 0000000..0d77ade --- /dev/null +++ b/vendor/github.com/alecthomas/template/parse/parse.go @@ -0,0 +1,700 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package parse builds parse trees for templates as defined by text/template +// and html/template. Clients should use those packages to construct templates +// rather than this one, which provides shared internal data structures not +// intended for general use. +package parse + +import ( + "bytes" + "fmt" + "runtime" + "strconv" + "strings" +) + +// Tree is the representation of a single parsed template. +type Tree struct { + Name string // name of the template represented by the tree. + ParseName string // name of the top-level template during parsing, for error messages. + Root *ListNode // top-level root of the tree. + text string // text parsed to create the template (or its parent) + // Parsing only; cleared after parse. + funcs []map[string]interface{} + lex *lexer + token [3]item // three-token lookahead for parser. + peekCount int + vars []string // variables defined at the moment. +} + +// Copy returns a copy of the Tree. Any parsing state is discarded. +func (t *Tree) Copy() *Tree { + if t == nil { + return nil + } + return &Tree{ + Name: t.Name, + ParseName: t.ParseName, + Root: t.Root.CopyList(), + text: t.text, + } +} + +// Parse returns a map from template name to parse.Tree, created by parsing the +// templates described in the argument string. The top-level template will be +// given the specified name. If an error is encountered, parsing stops and an +// empty map is returned with the error. +func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) { + treeSet = make(map[string]*Tree) + t := New(name) + t.text = text + _, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...) + return +} + +// next returns the next token. +func (t *Tree) next() item { + if t.peekCount > 0 { + t.peekCount-- + } else { + t.token[0] = t.lex.nextItem() + } + return t.token[t.peekCount] +} + +// backup backs the input stream up one token. +func (t *Tree) backup() { + t.peekCount++ +} + +// backup2 backs the input stream up two tokens. +// The zeroth token is already there. +func (t *Tree) backup2(t1 item) { + t.token[1] = t1 + t.peekCount = 2 +} + +// backup3 backs the input stream up three tokens +// The zeroth token is already there. +func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back. + t.token[1] = t1 + t.token[2] = t2 + t.peekCount = 3 +} + +// peek returns but does not consume the next token. +func (t *Tree) peek() item { + if t.peekCount > 0 { + return t.token[t.peekCount-1] + } + t.peekCount = 1 + t.token[0] = t.lex.nextItem() + return t.token[0] +} + +// nextNonSpace returns the next non-space token. +func (t *Tree) nextNonSpace() (token item) { + for { + token = t.next() + if token.typ != itemSpace { + break + } + } + return token +} + +// peekNonSpace returns but does not consume the next non-space token. +func (t *Tree) peekNonSpace() (token item) { + for { + token = t.next() + if token.typ != itemSpace { + break + } + } + t.backup() + return token +} + +// Parsing. + +// New allocates a new parse tree with the given name. +func New(name string, funcs ...map[string]interface{}) *Tree { + return &Tree{ + Name: name, + funcs: funcs, + } +} + +// ErrorContext returns a textual representation of the location of the node in the input text. +// The receiver is only used when the node does not have a pointer to the tree inside, +// which can occur in old code. +func (t *Tree) ErrorContext(n Node) (location, context string) { + pos := int(n.Position()) + tree := n.tree() + if tree == nil { + tree = t + } + text := tree.text[:pos] + byteNum := strings.LastIndex(text, "\n") + if byteNum == -1 { + byteNum = pos // On first line. + } else { + byteNum++ // After the newline. + byteNum = pos - byteNum + } + lineNum := 1 + strings.Count(text, "\n") + context = n.String() + if len(context) > 20 { + context = fmt.Sprintf("%.20s...", context) + } + return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context +} + +// errorf formats the error and terminates processing. +func (t *Tree) errorf(format string, args ...interface{}) { + t.Root = nil + format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format) + panic(fmt.Errorf(format, args...)) +} + +// error terminates processing. +func (t *Tree) error(err error) { + t.errorf("%s", err) +} + +// expect consumes the next token and guarantees it has the required type. +func (t *Tree) expect(expected itemType, context string) item { + token := t.nextNonSpace() + if token.typ != expected { + t.unexpected(token, context) + } + return token +} + +// expectOneOf consumes the next token and guarantees it has one of the required types. +func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item { + token := t.nextNonSpace() + if token.typ != expected1 && token.typ != expected2 { + t.unexpected(token, context) + } + return token +} + +// unexpected complains about the token and terminates processing. +func (t *Tree) unexpected(token item, context string) { + t.errorf("unexpected %s in %s", token, context) +} + +// recover is the handler that turns panics into returns from the top level of Parse. +func (t *Tree) recover(errp *error) { + e := recover() + if e != nil { + if _, ok := e.(runtime.Error); ok { + panic(e) + } + if t != nil { + t.stopParse() + } + *errp = e.(error) + } + return +} + +// startParse initializes the parser, using the lexer. +func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) { + t.Root = nil + t.lex = lex + t.vars = []string{"$"} + t.funcs = funcs +} + +// stopParse terminates parsing. +func (t *Tree) stopParse() { + t.lex = nil + t.vars = nil + t.funcs = nil +} + +// Parse parses the template definition string to construct a representation of +// the template for execution. If either action delimiter string is empty, the +// default ("{{" or "}}") is used. Embedded template definitions are added to +// the treeSet map. +func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) { + defer t.recover(&err) + t.ParseName = t.Name + t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim)) + t.text = text + t.parse(treeSet) + t.add(treeSet) + t.stopParse() + return t, nil +} + +// add adds tree to the treeSet. +func (t *Tree) add(treeSet map[string]*Tree) { + tree := treeSet[t.Name] + if tree == nil || IsEmptyTree(tree.Root) { + treeSet[t.Name] = t + return + } + if !IsEmptyTree(t.Root) { + t.errorf("template: multiple definition of template %q", t.Name) + } +} + +// IsEmptyTree reports whether this tree (node) is empty of everything but space. +func IsEmptyTree(n Node) bool { + switch n := n.(type) { + case nil: + return true + case *ActionNode: + case *IfNode: + case *ListNode: + for _, node := range n.Nodes { + if !IsEmptyTree(node) { + return false + } + } + return true + case *RangeNode: + case *TemplateNode: + case *TextNode: + return len(bytes.TrimSpace(n.Text)) == 0 + case *WithNode: + default: + panic("unknown node: " + n.String()) + } + return false +} + +// parse is the top-level parser for a template, essentially the same +// as itemList except it also parses {{define}} actions. +// It runs to EOF. +func (t *Tree) parse(treeSet map[string]*Tree) (next Node) { + t.Root = t.newList(t.peek().pos) + for t.peek().typ != itemEOF { + if t.peek().typ == itemLeftDelim { + delim := t.next() + if t.nextNonSpace().typ == itemDefine { + newT := New("definition") // name will be updated once we know it. + newT.text = t.text + newT.ParseName = t.ParseName + newT.startParse(t.funcs, t.lex) + newT.parseDefinition(treeSet) + continue + } + t.backup2(delim) + } + n := t.textOrAction() + if n.Type() == nodeEnd { + t.errorf("unexpected %s", n) + } + t.Root.append(n) + } + return nil +} + +// parseDefinition parses a {{define}} ... {{end}} template definition and +// installs the definition in the treeSet map. The "define" keyword has already +// been scanned. +func (t *Tree) parseDefinition(treeSet map[string]*Tree) { + const context = "define clause" + name := t.expectOneOf(itemString, itemRawString, context) + var err error + t.Name, err = strconv.Unquote(name.val) + if err != nil { + t.error(err) + } + t.expect(itemRightDelim, context) + var end Node + t.Root, end = t.itemList() + if end.Type() != nodeEnd { + t.errorf("unexpected %s in %s", end, context) + } + t.add(treeSet) + t.stopParse() +} + +// itemList: +// textOrAction* +// Terminates at {{end}} or {{else}}, returned separately. +func (t *Tree) itemList() (list *ListNode, next Node) { + list = t.newList(t.peekNonSpace().pos) + for t.peekNonSpace().typ != itemEOF { + n := t.textOrAction() + switch n.Type() { + case nodeEnd, nodeElse: + return list, n + } + list.append(n) + } + t.errorf("unexpected EOF") + return +} + +// textOrAction: +// text | action +func (t *Tree) textOrAction() Node { + switch token := t.nextNonSpace(); token.typ { + case itemElideNewline: + return t.elideNewline() + case itemText: + return t.newText(token.pos, token.val) + case itemLeftDelim: + return t.action() + default: + t.unexpected(token, "input") + } + return nil +} + +// elideNewline: +// Remove newlines trailing rightDelim if \\ is present. +func (t *Tree) elideNewline() Node { + token := t.peek() + if token.typ != itemText { + t.unexpected(token, "input") + return nil + } + + t.next() + stripped := strings.TrimLeft(token.val, "\n\r") + diff := len(token.val) - len(stripped) + if diff > 0 { + // This is a bit nasty. We mutate the token in-place to remove + // preceding newlines. + token.pos += Pos(diff) + token.val = stripped + } + return t.newText(token.pos, token.val) +} + +// Action: +// control +// command ("|" command)* +// Left delim is past. Now get actions. +// First word could be a keyword such as range. +func (t *Tree) action() (n Node) { + switch token := t.nextNonSpace(); token.typ { + case itemElse: + return t.elseControl() + case itemEnd: + return t.endControl() + case itemIf: + return t.ifControl() + case itemRange: + return t.rangeControl() + case itemTemplate: + return t.templateControl() + case itemWith: + return t.withControl() + } + t.backup() + // Do not pop variables; they persist until "end". + return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command")) +} + +// Pipeline: +// declarations? command ('|' command)* +func (t *Tree) pipeline(context string) (pipe *PipeNode) { + var decl []*VariableNode + pos := t.peekNonSpace().pos + // Are there declarations? + for { + if v := t.peekNonSpace(); v.typ == itemVariable { + t.next() + // Since space is a token, we need 3-token look-ahead here in the worst case: + // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an + // argument variable rather than a declaration. So remember the token + // adjacent to the variable so we can push it back if necessary. + tokenAfterVariable := t.peek() + if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") { + t.nextNonSpace() + variable := t.newVariable(v.pos, v.val) + decl = append(decl, variable) + t.vars = append(t.vars, v.val) + if next.typ == itemChar && next.val == "," { + if context == "range" && len(decl) < 2 { + continue + } + t.errorf("too many declarations in %s", context) + } + } else if tokenAfterVariable.typ == itemSpace { + t.backup3(v, tokenAfterVariable) + } else { + t.backup2(v) + } + } + break + } + pipe = t.newPipeline(pos, t.lex.lineNumber(), decl) + for { + switch token := t.nextNonSpace(); token.typ { + case itemRightDelim, itemRightParen: + if len(pipe.Cmds) == 0 { + t.errorf("missing value for %s", context) + } + if token.typ == itemRightParen { + t.backup() + } + return + case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier, + itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen: + t.backup() + pipe.append(t.command()) + default: + t.unexpected(token, context) + } + } +} + +func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) { + defer t.popVars(len(t.vars)) + line = t.lex.lineNumber() + pipe = t.pipeline(context) + var next Node + list, next = t.itemList() + switch next.Type() { + case nodeEnd: //done + case nodeElse: + if allowElseIf { + // Special case for "else if". If the "else" is followed immediately by an "if", + // the elseControl will have left the "if" token pending. Treat + // {{if a}}_{{else if b}}_{{end}} + // as + // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}. + // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}} + // is assumed. This technique works even for long if-else-if chains. + // TODO: Should we allow else-if in with and range? + if t.peek().typ == itemIf { + t.next() // Consume the "if" token. + elseList = t.newList(next.Position()) + elseList.append(t.ifControl()) + // Do not consume the next item - only one {{end}} required. + break + } + } + elseList, next = t.itemList() + if next.Type() != nodeEnd { + t.errorf("expected end; found %s", next) + } + } + return pipe.Position(), line, pipe, list, elseList +} + +// If: +// {{if pipeline}} itemList {{end}} +// {{if pipeline}} itemList {{else}} itemList {{end}} +// If keyword is past. +func (t *Tree) ifControl() Node { + return t.newIf(t.parseControl(true, "if")) +} + +// Range: +// {{range pipeline}} itemList {{end}} +// {{range pipeline}} itemList {{else}} itemList {{end}} +// Range keyword is past. +func (t *Tree) rangeControl() Node { + return t.newRange(t.parseControl(false, "range")) +} + +// With: +// {{with pipeline}} itemList {{end}} +// {{with pipeline}} itemList {{else}} itemList {{end}} +// If keyword is past. +func (t *Tree) withControl() Node { + return t.newWith(t.parseControl(false, "with")) +} + +// End: +// {{end}} +// End keyword is past. +func (t *Tree) endControl() Node { + return t.newEnd(t.expect(itemRightDelim, "end").pos) +} + +// Else: +// {{else}} +// Else keyword is past. +func (t *Tree) elseControl() Node { + // Special case for "else if". + peek := t.peekNonSpace() + if peek.typ == itemIf { + // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ". + return t.newElse(peek.pos, t.lex.lineNumber()) + } + return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber()) +} + +// Template: +// {{template stringValue pipeline}} +// Template keyword is past. The name must be something that can evaluate +// to a string. +func (t *Tree) templateControl() Node { + var name string + token := t.nextNonSpace() + switch token.typ { + case itemString, itemRawString: + s, err := strconv.Unquote(token.val) + if err != nil { + t.error(err) + } + name = s + default: + t.unexpected(token, "template invocation") + } + var pipe *PipeNode + if t.nextNonSpace().typ != itemRightDelim { + t.backup() + // Do not pop variables; they persist until "end". + pipe = t.pipeline("template") + } + return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe) +} + +// command: +// operand (space operand)* +// space-separated arguments up to a pipeline character or right delimiter. +// we consume the pipe character but leave the right delim to terminate the action. +func (t *Tree) command() *CommandNode { + cmd := t.newCommand(t.peekNonSpace().pos) + for { + t.peekNonSpace() // skip leading spaces. + operand := t.operand() + if operand != nil { + cmd.append(operand) + } + switch token := t.next(); token.typ { + case itemSpace: + continue + case itemError: + t.errorf("%s", token.val) + case itemRightDelim, itemRightParen: + t.backup() + case itemPipe: + default: + t.errorf("unexpected %s in operand; missing space?", token) + } + break + } + if len(cmd.Args) == 0 { + t.errorf("empty command") + } + return cmd +} + +// operand: +// term .Field* +// An operand is a space-separated component of a command, +// a term possibly followed by field accesses. +// A nil return means the next item is not an operand. +func (t *Tree) operand() Node { + node := t.term() + if node == nil { + return nil + } + if t.peek().typ == itemField { + chain := t.newChain(t.peek().pos, node) + for t.peek().typ == itemField { + chain.Add(t.next().val) + } + // Compatibility with original API: If the term is of type NodeField + // or NodeVariable, just put more fields on the original. + // Otherwise, keep the Chain node. + // TODO: Switch to Chains always when we can. + switch node.Type() { + case NodeField: + node = t.newField(chain.Position(), chain.String()) + case NodeVariable: + node = t.newVariable(chain.Position(), chain.String()) + default: + node = chain + } + } + return node +} + +// term: +// literal (number, string, nil, boolean) +// function (identifier) +// . +// .Field +// $ +// '(' pipeline ')' +// A term is a simple "expression". +// A nil return means the next item is not a term. +func (t *Tree) term() Node { + switch token := t.nextNonSpace(); token.typ { + case itemError: + t.errorf("%s", token.val) + case itemIdentifier: + if !t.hasFunction(token.val) { + t.errorf("function %q not defined", token.val) + } + return NewIdentifier(token.val).SetTree(t).SetPos(token.pos) + case itemDot: + return t.newDot(token.pos) + case itemNil: + return t.newNil(token.pos) + case itemVariable: + return t.useVar(token.pos, token.val) + case itemField: + return t.newField(token.pos, token.val) + case itemBool: + return t.newBool(token.pos, token.val == "true") + case itemCharConstant, itemComplex, itemNumber: + number, err := t.newNumber(token.pos, token.val, token.typ) + if err != nil { + t.error(err) + } + return number + case itemLeftParen: + pipe := t.pipeline("parenthesized pipeline") + if token := t.next(); token.typ != itemRightParen { + t.errorf("unclosed right paren: unexpected %s", token) + } + return pipe + case itemString, itemRawString: + s, err := strconv.Unquote(token.val) + if err != nil { + t.error(err) + } + return t.newString(token.pos, token.val, s) + } + t.backup() + return nil +} + +// hasFunction reports if a function name exists in the Tree's maps. +func (t *Tree) hasFunction(name string) bool { + for _, funcMap := range t.funcs { + if funcMap == nil { + continue + } + if funcMap[name] != nil { + return true + } + } + return false +} + +// popVars trims the variable list to the specified length +func (t *Tree) popVars(n int) { + t.vars = t.vars[:n] +} + +// useVar returns a node for a variable reference. It errors if the +// variable is not defined. +func (t *Tree) useVar(pos Pos, name string) Node { + v := t.newVariable(pos, name) + for _, varName := range t.vars { + if varName == v.Ident[0] { + return v + } + } + t.errorf("undefined variable %q", v.Ident[0]) + return nil +} diff --git a/vendor/github.com/alecthomas/template/template.go b/vendor/github.com/alecthomas/template/template.go new file mode 100644 index 0000000..447ed2a --- /dev/null +++ b/vendor/github.com/alecthomas/template/template.go @@ -0,0 +1,218 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "fmt" + "reflect" + + "github.com/alecthomas/template/parse" +) + +// common holds the information shared by related templates. +type common struct { + tmpl map[string]*Template + // We use two maps, one for parsing and one for execution. + // This separation makes the API cleaner since it doesn't + // expose reflection to the client. + parseFuncs FuncMap + execFuncs map[string]reflect.Value +} + +// Template is the representation of a parsed template. The *parse.Tree +// field is exported only for use by html/template and should be treated +// as unexported by all other clients. +type Template struct { + name string + *parse.Tree + *common + leftDelim string + rightDelim string +} + +// New allocates a new template with the given name. +func New(name string) *Template { + return &Template{ + name: name, + } +} + +// Name returns the name of the template. +func (t *Template) Name() string { + return t.name +} + +// New allocates a new template associated with the given one and with the same +// delimiters. The association, which is transitive, allows one template to +// invoke another with a {{template}} action. +func (t *Template) New(name string) *Template { + t.init() + return &Template{ + name: name, + common: t.common, + leftDelim: t.leftDelim, + rightDelim: t.rightDelim, + } +} + +func (t *Template) init() { + if t.common == nil { + t.common = new(common) + t.tmpl = make(map[string]*Template) + t.parseFuncs = make(FuncMap) + t.execFuncs = make(map[string]reflect.Value) + } +} + +// Clone returns a duplicate of the template, including all associated +// templates. The actual representation is not copied, but the name space of +// associated templates is, so further calls to Parse in the copy will add +// templates to the copy but not to the original. Clone can be used to prepare +// common templates and use them with variant definitions for other templates +// by adding the variants after the clone is made. +func (t *Template) Clone() (*Template, error) { + nt := t.copy(nil) + nt.init() + nt.tmpl[t.name] = nt + for k, v := range t.tmpl { + if k == t.name { // Already installed. + continue + } + // The associated templates share nt's common structure. + tmpl := v.copy(nt.common) + nt.tmpl[k] = tmpl + } + for k, v := range t.parseFuncs { + nt.parseFuncs[k] = v + } + for k, v := range t.execFuncs { + nt.execFuncs[k] = v + } + return nt, nil +} + +// copy returns a shallow copy of t, with common set to the argument. +func (t *Template) copy(c *common) *Template { + nt := New(t.name) + nt.Tree = t.Tree + nt.common = c + nt.leftDelim = t.leftDelim + nt.rightDelim = t.rightDelim + return nt +} + +// AddParseTree creates a new template with the name and parse tree +// and associates it with t. +func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) { + if t.common != nil && t.tmpl[name] != nil { + return nil, fmt.Errorf("template: redefinition of template %q", name) + } + nt := t.New(name) + nt.Tree = tree + t.tmpl[name] = nt + return nt, nil +} + +// Templates returns a slice of the templates associated with t, including t +// itself. +func (t *Template) Templates() []*Template { + if t.common == nil { + return nil + } + // Return a slice so we don't expose the map. + m := make([]*Template, 0, len(t.tmpl)) + for _, v := range t.tmpl { + m = append(m, v) + } + return m +} + +// Delims sets the action delimiters to the specified strings, to be used in +// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template +// definitions will inherit the settings. An empty delimiter stands for the +// corresponding default: {{ or }}. +// The return value is the template, so calls can be chained. +func (t *Template) Delims(left, right string) *Template { + t.leftDelim = left + t.rightDelim = right + return t +} + +// Funcs adds the elements of the argument map to the template's function map. +// It panics if a value in the map is not a function with appropriate return +// type. However, it is legal to overwrite elements of the map. The return +// value is the template, so calls can be chained. +func (t *Template) Funcs(funcMap FuncMap) *Template { + t.init() + addValueFuncs(t.execFuncs, funcMap) + addFuncs(t.parseFuncs, funcMap) + return t +} + +// Lookup returns the template with the given name that is associated with t, +// or nil if there is no such template. +func (t *Template) Lookup(name string) *Template { + if t.common == nil { + return nil + } + return t.tmpl[name] +} + +// Parse parses a string into a template. Nested template definitions will be +// associated with the top-level template t. Parse may be called multiple times +// to parse definitions of templates to associate with t. It is an error if a +// resulting template is non-empty (contains content other than template +// definitions) and would replace a non-empty template with the same name. +// (In multiple calls to Parse with the same receiver template, only one call +// can contain text other than space, comments, and template definitions.) +func (t *Template) Parse(text string) (*Template, error) { + t.init() + trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins) + if err != nil { + return nil, err + } + // Add the newly parsed trees, including the one for t, into our common structure. + for name, tree := range trees { + // If the name we parsed is the name of this template, overwrite this template. + // The associate method checks it's not a redefinition. + tmpl := t + if name != t.name { + tmpl = t.New(name) + } + // Even if t == tmpl, we need to install it in the common.tmpl map. + if replace, err := t.associate(tmpl, tree); err != nil { + return nil, err + } else if replace { + tmpl.Tree = tree + } + tmpl.leftDelim = t.leftDelim + tmpl.rightDelim = t.rightDelim + } + return t, nil +} + +// associate installs the new template into the group of templates associated +// with t. It is an error to reuse a name except to overwrite an empty +// template. The two are already known to share the common structure. +// The boolean return value reports wither to store this tree as t.Tree. +func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) { + if new.common != t.common { + panic("internal error: associate not common") + } + name := new.name + if old := t.tmpl[name]; old != nil { + oldIsEmpty := parse.IsEmptyTree(old.Root) + newIsEmpty := parse.IsEmptyTree(tree.Root) + if newIsEmpty { + // Whether old is empty or not, new is empty; no reason to replace old. + return false, nil + } + if !oldIsEmpty { + return false, fmt.Errorf("template: redefinition of template %q", name) + } + } + t.tmpl[name] = new + return true, nil +} diff --git a/vendor/github.com/alecthomas/units/COPYING b/vendor/github.com/alecthomas/units/COPYING new file mode 100644 index 0000000..2993ec0 --- /dev/null +++ b/vendor/github.com/alecthomas/units/COPYING @@ -0,0 +1,19 @@ +Copyright (C) 2014 Alec Thomas + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/alecthomas/units/bytes.go b/vendor/github.com/alecthomas/units/bytes.go new file mode 100644 index 0000000..61d0ca4 --- /dev/null +++ b/vendor/github.com/alecthomas/units/bytes.go @@ -0,0 +1,85 @@ +package units + +// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte, +// etc.). +type Base2Bytes int64 + +// Base-2 byte units. +const ( + Kibibyte Base2Bytes = 1024 + KiB = Kibibyte + Mebibyte = Kibibyte * 1024 + MiB = Mebibyte + Gibibyte = Mebibyte * 1024 + GiB = Gibibyte + Tebibyte = Gibibyte * 1024 + TiB = Tebibyte + Pebibyte = Tebibyte * 1024 + PiB = Pebibyte + Exbibyte = Pebibyte * 1024 + EiB = Exbibyte +) + +var ( + bytesUnitMap = MakeUnitMap("iB", "B", 1024) + oldBytesUnitMap = MakeUnitMap("B", "B", 1024) +) + +// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB +// and KiB are both 1024. +// However "kB", which is the correct SI spelling of 1000 Bytes, is rejected. +func ParseBase2Bytes(s string) (Base2Bytes, error) { + n, err := ParseUnit(s, bytesUnitMap) + if err != nil { + n, err = ParseUnit(s, oldBytesUnitMap) + } + return Base2Bytes(n), err +} + +func (b Base2Bytes) String() string { + return ToString(int64(b), 1024, "iB", "B") +} + +var ( + metricBytesUnitMap = MakeUnitMap("B", "B", 1000) +) + +// MetricBytes are SI byte units (1000 bytes in a kilobyte). +type MetricBytes SI + +// SI base-10 byte units. +const ( + Kilobyte MetricBytes = 1000 + KB = Kilobyte + Megabyte = Kilobyte * 1000 + MB = Megabyte + Gigabyte = Megabyte * 1000 + GB = Gigabyte + Terabyte = Gigabyte * 1000 + TB = Terabyte + Petabyte = Terabyte * 1000 + PB = Petabyte + Exabyte = Petabyte * 1000 + EB = Exabyte +) + +// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes. +func ParseMetricBytes(s string) (MetricBytes, error) { + n, err := ParseUnit(s, metricBytesUnitMap) + return MetricBytes(n), err +} + +// TODO: represents 1000B as uppercase "KB", while SI standard requires "kB". +func (m MetricBytes) String() string { + return ToString(int64(m), 1000, "B", "B") +} + +// ParseStrictBytes supports both iB and B suffixes for base 2 and metric, +// respectively. That is, KiB represents 1024 and kB, KB represent 1000. +func ParseStrictBytes(s string) (int64, error) { + n, err := ParseUnit(s, bytesUnitMap) + if err != nil { + n, err = ParseUnit(s, metricBytesUnitMap) + } + return int64(n), err +} diff --git a/vendor/github.com/alecthomas/units/doc.go b/vendor/github.com/alecthomas/units/doc.go new file mode 100644 index 0000000..156ae38 --- /dev/null +++ b/vendor/github.com/alecthomas/units/doc.go @@ -0,0 +1,13 @@ +// Package units provides helpful unit multipliers and functions for Go. +// +// The goal of this package is to have functionality similar to the time [1] package. +// +// +// [1] http://golang.org/pkg/time/ +// +// It allows for code like this: +// +// n, err := ParseBase2Bytes("1KB") +// // n == 1024 +// n = units.Mebibyte * 512 +package units diff --git a/vendor/github.com/alecthomas/units/si.go b/vendor/github.com/alecthomas/units/si.go new file mode 100644 index 0000000..99b2fa4 --- /dev/null +++ b/vendor/github.com/alecthomas/units/si.go @@ -0,0 +1,50 @@ +package units + +// SI units. +type SI int64 + +// SI unit multiples. +const ( + Kilo SI = 1000 + Mega = Kilo * 1000 + Giga = Mega * 1000 + Tera = Giga * 1000 + Peta = Tera * 1000 + Exa = Peta * 1000 +) + +func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 { + res := map[string]float64{ + shortSuffix: 1, + // see below for "k" / "K" + "M" + suffix: float64(scale * scale), + "G" + suffix: float64(scale * scale * scale), + "T" + suffix: float64(scale * scale * scale * scale), + "P" + suffix: float64(scale * scale * scale * scale * scale), + "E" + suffix: float64(scale * scale * scale * scale * scale * scale), + } + + // Standard SI prefixes use lowercase "k" for kilo = 1000. + // For compatibility, and to be fool-proof, we accept both "k" and "K" in metric mode. + // + // However, official binary prefixes are always capitalized - "KiB" - + // and we specifically never parse "kB" as 1024B because: + // + // (1) people pedantic enough to use lowercase according to SI unlikely to abuse "k" to mean 1024 :-) + // + // (2) Use of capital K for 1024 was an informal tradition predating IEC prefixes: + // "The binary meaning of the kilobyte for 1024 bytes typically uses the symbol KB, with an + // uppercase letter K." + // -- https://en.wikipedia.org/wiki/Kilobyte#Base_2_(1024_bytes) + // "Capitalization of the letter K became the de facto standard for binary notation, although this + // could not be extended to higher powers, and use of the lowercase k did persist.[13][14][15]" + // -- https://en.wikipedia.org/wiki/Binary_prefix#History + // See also the extensive https://en.wikipedia.org/wiki/Timeline_of_binary_prefixes. + if scale == 1024 { + res["K"+suffix] = float64(scale) + } else { + res["k"+suffix] = float64(scale) + res["K"+suffix] = float64(scale) + } + return res +} diff --git a/vendor/github.com/alecthomas/units/util.go b/vendor/github.com/alecthomas/units/util.go new file mode 100644 index 0000000..6527e92 --- /dev/null +++ b/vendor/github.com/alecthomas/units/util.go @@ -0,0 +1,138 @@ +package units + +import ( + "errors" + "fmt" + "strings" +) + +var ( + siUnits = []string{"", "K", "M", "G", "T", "P", "E"} +) + +func ToString(n int64, scale int64, suffix, baseSuffix string) string { + mn := len(siUnits) + out := make([]string, mn) + for i, m := range siUnits { + if n%scale != 0 || i == 0 && n == 0 { + s := suffix + if i == 0 { + s = baseSuffix + } + out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s) + } + n /= scale + if n == 0 { + break + } + } + return strings.Join(out, "") +} + +// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123 +var errLeadingInt = errors.New("units: bad [0-9]*") // never printed + +// leadingInt consumes the leading [0-9]* from s. +func leadingInt(s string) (x int64, rem string, err error) { + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + break + } + if x >= (1<<63-10)/10 { + // overflow + return 0, "", errLeadingInt + } + x = x*10 + int64(c) - '0' + } + return x, s[i:], nil +} + +func ParseUnit(s string, unitMap map[string]float64) (int64, error) { + // [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+ + orig := s + f := float64(0) + neg := false + + // Consume [-+]? + if s != "" { + c := s[0] + if c == '-' || c == '+' { + neg = c == '-' + s = s[1:] + } + } + // Special case: if all that is left is "0", this is zero. + if s == "0" { + return 0, nil + } + if s == "" { + return 0, errors.New("units: invalid " + orig) + } + for s != "" { + g := float64(0) // this element of the sequence + + var x int64 + var err error + + // The next character must be [0-9.] + if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) { + return 0, errors.New("units: invalid " + orig) + } + // Consume [0-9]* + pl := len(s) + x, s, err = leadingInt(s) + if err != nil { + return 0, errors.New("units: invalid " + orig) + } + g = float64(x) + pre := pl != len(s) // whether we consumed anything before a period + + // Consume (\.[0-9]*)? + post := false + if s != "" && s[0] == '.' { + s = s[1:] + pl := len(s) + x, s, err = leadingInt(s) + if err != nil { + return 0, errors.New("units: invalid " + orig) + } + scale := 1.0 + for n := pl - len(s); n > 0; n-- { + scale *= 10 + } + g += float64(x) / scale + post = pl != len(s) + } + if !pre && !post { + // no digits (e.g. ".s" or "-.s") + return 0, errors.New("units: invalid " + orig) + } + + // Consume unit. + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c == '.' || ('0' <= c && c <= '9') { + break + } + } + u := s[:i] + s = s[i:] + unit, ok := unitMap[u] + if !ok { + return 0, errors.New("units: unknown unit " + u + " in " + orig) + } + + f += g * unit + } + + if neg { + f = -f + } + if f < float64(-1<<63) || f > float64(1<<63-1) { + return 0, errors.New("units: overflow parsing unit") + } + return int64(f), nil +} diff --git a/vendor/github.com/coreos/etcd/Documentation/README.md b/vendor/github.com/coreos/etcd/Documentation/README.md new file mode 120000 index 0000000..8828313 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Documentation/README.md @@ -0,0 +1 @@ +docs.md \ No newline at end of file diff --git a/vendor/github.com/coreos/etcd/cmd/etcd b/vendor/github.com/coreos/etcd/cmd/etcd new file mode 120000 index 0000000..b870225 --- /dev/null +++ b/vendor/github.com/coreos/etcd/cmd/etcd @@ -0,0 +1 @@ +../ \ No newline at end of file diff --git a/vendor/github.com/coreos/etcd/cmd/etcdctl b/vendor/github.com/coreos/etcd/cmd/etcdctl new file mode 120000 index 0000000..05bb269 --- /dev/null +++ b/vendor/github.com/coreos/etcd/cmd/etcdctl @@ -0,0 +1 @@ +../etcdctl \ No newline at end of file diff --git a/vendor/github.com/coreos/etcd/cmd/functional b/vendor/github.com/coreos/etcd/cmd/functional new file mode 120000 index 0000000..44faa31 --- /dev/null +++ b/vendor/github.com/coreos/etcd/cmd/functional @@ -0,0 +1 @@ +../functional \ No newline at end of file diff --git a/vendor/github.com/coreos/etcd/cmd/tools b/vendor/github.com/coreos/etcd/cmd/tools new file mode 120000 index 0000000..4887d6e --- /dev/null +++ b/vendor/github.com/coreos/etcd/cmd/tools @@ -0,0 +1 @@ +../tools \ No newline at end of file diff --git a/vendor/github.com/go-stack/stack/LICENSE.md b/vendor/github.com/go-stack/stack/LICENSE.md new file mode 100644 index 0000000..2abf98e --- /dev/null +++ b/vendor/github.com/go-stack/stack/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Chris Hines + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-stack/stack/stack.go b/vendor/github.com/go-stack/stack/stack.go new file mode 100644 index 0000000..ac3b93b --- /dev/null +++ b/vendor/github.com/go-stack/stack/stack.go @@ -0,0 +1,400 @@ +// +build go1.7 + +// Package stack implements utilities to capture, manipulate, and format call +// stacks. It provides a simpler API than package runtime. +// +// The implementation takes care of the minutia and special cases of +// interpreting the program counter (pc) values returned by runtime.Callers. +// +// Package stack's types implement fmt.Formatter, which provides a simple and +// flexible way to declaratively configure formatting when used with logging +// or error tracking packages. +package stack + +import ( + "bytes" + "errors" + "fmt" + "io" + "runtime" + "strconv" + "strings" +) + +// Call records a single function invocation from a goroutine stack. +type Call struct { + frame runtime.Frame +} + +// Caller returns a Call from the stack of the current goroutine. The argument +// skip is the number of stack frames to ascend, with 0 identifying the +// calling function. +func Caller(skip int) Call { + // As of Go 1.9 we need room for up to three PC entries. + // + // 0. An entry for the stack frame prior to the target to check for + // special handling needed if that prior entry is runtime.sigpanic. + // 1. A possible second entry to hold metadata about skipped inlined + // functions. If inline functions were not skipped the target frame + // PC will be here. + // 2. A third entry for the target frame PC when the second entry + // is used for skipped inline functions. + var pcs [3]uintptr + n := runtime.Callers(skip+1, pcs[:]) + frames := runtime.CallersFrames(pcs[:n]) + frame, _ := frames.Next() + frame, _ = frames.Next() + + return Call{ + frame: frame, + } +} + +// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c). +func (c Call) String() string { + return fmt.Sprint(c) +} + +// MarshalText implements encoding.TextMarshaler. It formats the Call the same +// as fmt.Sprintf("%v", c). +func (c Call) MarshalText() ([]byte, error) { + if c.frame == (runtime.Frame{}) { + return nil, ErrNoFunc + } + + buf := bytes.Buffer{} + fmt.Fprint(&buf, c) + return buf.Bytes(), nil +} + +// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely +// cause is a Call with the zero value. +var ErrNoFunc = errors.New("no call stack information") + +// Format implements fmt.Formatter with support for the following verbs. +// +// %s source file +// %d line number +// %n function name +// %k last segment of the package path +// %v equivalent to %s:%d +// +// It accepts the '+' and '#' flags for most of the verbs as follows. +// +// %+s path of source file relative to the compile time GOPATH, +// or the module path joined to the path of source file relative +// to module root +// %#s full path of source file +// %+n import path qualified function name +// %+k full package path +// %+v equivalent to %+s:%d +// %#v equivalent to %#s:%d +func (c Call) Format(s fmt.State, verb rune) { + if c.frame == (runtime.Frame{}) { + fmt.Fprintf(s, "%%!%c(NOFUNC)", verb) + return + } + + switch verb { + case 's', 'v': + file := c.frame.File + switch { + case s.Flag('#'): + // done + case s.Flag('+'): + file = pkgFilePath(&c.frame) + default: + const sep = "/" + if i := strings.LastIndex(file, sep); i != -1 { + file = file[i+len(sep):] + } + } + io.WriteString(s, file) + if verb == 'v' { + buf := [7]byte{':'} + s.Write(strconv.AppendInt(buf[:1], int64(c.frame.Line), 10)) + } + + case 'd': + buf := [6]byte{} + s.Write(strconv.AppendInt(buf[:0], int64(c.frame.Line), 10)) + + case 'k': + name := c.frame.Function + const pathSep = "/" + start, end := 0, len(name) + if i := strings.LastIndex(name, pathSep); i != -1 { + start = i + len(pathSep) + } + const pkgSep = "." + if i := strings.Index(name[start:], pkgSep); i != -1 { + end = start + i + } + if s.Flag('+') { + start = 0 + } + io.WriteString(s, name[start:end]) + + case 'n': + name := c.frame.Function + if !s.Flag('+') { + const pathSep = "/" + if i := strings.LastIndex(name, pathSep); i != -1 { + name = name[i+len(pathSep):] + } + const pkgSep = "." + if i := strings.Index(name, pkgSep); i != -1 { + name = name[i+len(pkgSep):] + } + } + io.WriteString(s, name) + } +} + +// Frame returns the call frame infomation for the Call. +func (c Call) Frame() runtime.Frame { + return c.frame +} + +// PC returns the program counter for this call frame; multiple frames may +// have the same PC value. +// +// Deprecated: Use Call.Frame instead. +func (c Call) PC() uintptr { + return c.frame.PC +} + +// CallStack records a sequence of function invocations from a goroutine +// stack. +type CallStack []Call + +// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs). +func (cs CallStack) String() string { + return fmt.Sprint(cs) +} + +var ( + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + spaceBytes = []byte(" ") +) + +// MarshalText implements encoding.TextMarshaler. It formats the CallStack the +// same as fmt.Sprintf("%v", cs). +func (cs CallStack) MarshalText() ([]byte, error) { + buf := bytes.Buffer{} + buf.Write(openBracketBytes) + for i, pc := range cs { + if i > 0 { + buf.Write(spaceBytes) + } + fmt.Fprint(&buf, pc) + } + buf.Write(closeBracketBytes) + return buf.Bytes(), nil +} + +// Format implements fmt.Formatter by printing the CallStack as square brackets +// ([, ]) surrounding a space separated list of Calls each formatted with the +// supplied verb and options. +func (cs CallStack) Format(s fmt.State, verb rune) { + s.Write(openBracketBytes) + for i, pc := range cs { + if i > 0 { + s.Write(spaceBytes) + } + pc.Format(s, verb) + } + s.Write(closeBracketBytes) +} + +// Trace returns a CallStack for the current goroutine with element 0 +// identifying the calling function. +func Trace() CallStack { + var pcs [512]uintptr + n := runtime.Callers(1, pcs[:]) + + frames := runtime.CallersFrames(pcs[:n]) + cs := make(CallStack, 0, n) + + // Skip extra frame retrieved just to make sure the runtime.sigpanic + // special case is handled. + frame, more := frames.Next() + + for more { + frame, more = frames.Next() + cs = append(cs, Call{frame: frame}) + } + + return cs +} + +// TrimBelow returns a slice of the CallStack with all entries below c +// removed. +func (cs CallStack) TrimBelow(c Call) CallStack { + for len(cs) > 0 && cs[0] != c { + cs = cs[1:] + } + return cs +} + +// TrimAbove returns a slice of the CallStack with all entries above c +// removed. +func (cs CallStack) TrimAbove(c Call) CallStack { + for len(cs) > 0 && cs[len(cs)-1] != c { + cs = cs[:len(cs)-1] + } + return cs +} + +// pkgIndex returns the index that results in file[index:] being the path of +// file relative to the compile time GOPATH, and file[:index] being the +// $GOPATH/src/ portion of file. funcName must be the name of a function in +// file as returned by runtime.Func.Name. +func pkgIndex(file, funcName string) int { + // As of Go 1.6.2 there is no direct way to know the compile time GOPATH + // at runtime, but we can infer the number of path segments in the GOPATH. + // We note that runtime.Func.Name() returns the function name qualified by + // the import path, which does not include the GOPATH. Thus we can trim + // segments from the beginning of the file path until the number of path + // separators remaining is one more than the number of path separators in + // the function name. For example, given: + // + // GOPATH /home/user + // file /home/user/src/pkg/sub/file.go + // fn.Name() pkg/sub.Type.Method + // + // We want to produce: + // + // file[:idx] == /home/user/src/ + // file[idx:] == pkg/sub/file.go + // + // From this we can easily see that fn.Name() has one less path separator + // than our desired result for file[idx:]. We count separators from the + // end of the file path until it finds two more than in the function name + // and then move one character forward to preserve the initial path + // segment without a leading separator. + const sep = "/" + i := len(file) + for n := strings.Count(funcName, sep) + 2; n > 0; n-- { + i = strings.LastIndex(file[:i], sep) + if i == -1 { + i = -len(sep) + break + } + } + // get back to 0 or trim the leading separator + return i + len(sep) +} + +// pkgFilePath returns the frame's filepath relative to the compile-time GOPATH, +// or its module path joined to its path relative to the module root. +// +// As of Go 1.11 there is no direct way to know the compile time GOPATH or +// module paths at runtime, but we can piece together the desired information +// from available information. We note that runtime.Frame.Function contains the +// function name qualified by the package path, which includes the module path +// but not the GOPATH. We can extract the package path from that and append the +// last segments of the file path to arrive at the desired package qualified +// file path. For example, given: +// +// GOPATH /home/user +// import path pkg/sub +// frame.File /home/user/src/pkg/sub/file.go +// frame.Function pkg/sub.Type.Method +// Desired return pkg/sub/file.go +// +// It appears that we simply need to trim ".Type.Method" from frame.Function and +// append "/" + path.Base(file). +// +// But there are other wrinkles. Although it is idiomatic to do so, the internal +// name of a package is not required to match the last segment of its import +// path. In addition, the introduction of modules in Go 1.11 allows working +// without a GOPATH. So we also must make these work right: +// +// GOPATH /home/user +// import path pkg/go-sub +// package name sub +// frame.File /home/user/src/pkg/go-sub/file.go +// frame.Function pkg/sub.Type.Method +// Desired return pkg/go-sub/file.go +// +// Module path pkg/v2 +// import path pkg/v2/go-sub +// package name sub +// frame.File /home/user/cloned-pkg/go-sub/file.go +// frame.Function pkg/v2/sub.Type.Method +// Desired return pkg/v2/go-sub/file.go +// +// We can handle all of these situations by using the package path extracted +// from frame.Function up to, but not including, the last segment as the prefix +// and the last two segments of frame.File as the suffix of the returned path. +// This preserves the existing behavior when working in a GOPATH without modules +// and a semantically equivalent behavior when used in module aware project. +func pkgFilePath(frame *runtime.Frame) string { + pre := pkgPrefix(frame.Function) + post := pathSuffix(frame.File) + if pre == "" { + return post + } + return pre + "/" + post +} + +// pkgPrefix returns the import path of the function's package with the final +// segment removed. +func pkgPrefix(funcName string) string { + const pathSep = "/" + end := strings.LastIndex(funcName, pathSep) + if end == -1 { + return "" + } + return funcName[:end] +} + +// pathSuffix returns the last two segments of path. +func pathSuffix(path string) string { + const pathSep = "/" + lastSep := strings.LastIndex(path, pathSep) + if lastSep == -1 { + return path + } + return path[strings.LastIndex(path[:lastSep], pathSep)+1:] +} + +var runtimePath string + +func init() { + var pcs [3]uintptr + runtime.Callers(0, pcs[:]) + frames := runtime.CallersFrames(pcs[:]) + frame, _ := frames.Next() + file := frame.File + + idx := pkgIndex(frame.File, frame.Function) + + runtimePath = file[:idx] + if runtime.GOOS == "windows" { + runtimePath = strings.ToLower(runtimePath) + } +} + +func inGoroot(c Call) bool { + file := c.frame.File + if len(file) == 0 || file[0] == '?' { + return true + } + if runtime.GOOS == "windows" { + file = strings.ToLower(file) + } + return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go") +} + +// TrimRuntime returns a slice of the CallStack with the topmost entries from +// the go runtime removed. It considers any calls originating from unknown +// files, files under GOROOT, or _testmain.go as part of the runtime. +func (cs CallStack) TrimRuntime() CallStack { + for len(cs) > 0 && inGoroot(cs[len(cs)-1]) { + cs = cs[:len(cs)-1] + } + return cs +} diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS new file mode 100644 index 0000000..f10b49b --- /dev/null +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -0,0 +1,16 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Klaus Post +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS new file mode 100644 index 0000000..3bd40cf --- /dev/null +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -0,0 +1,38 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Klaus Post +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE new file mode 100644 index 0000000..6050c10 --- /dev/null +++ b/vendor/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 0000000..f1e04b1 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,241 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go new file mode 100644 index 0000000..fcd192b --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 0000000..e6179f6 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 0000000..b88318e --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 0000000..7f23657 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go new file mode 100644 index 0000000..150d91b --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 0000000..adfd979 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 0000000..dbcae90 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 0000000..ece692e --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/README.md new file mode 120000 index 0000000..71bfc07 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/README.md @@ -0,0 +1 @@ +DOC.md \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/README.md new file mode 120000 index 0000000..71bfc07 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/README.md @@ -0,0 +1 @@ +DOC.md \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/README.md new file mode 120000 index 0000000..71bfc07 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/README.md @@ -0,0 +1 @@ +DOC.md \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/README.md new file mode 120000 index 0000000..71bfc07 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/README.md @@ -0,0 +1 @@ +DOC.md \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/recovery/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/recovery/README.md new file mode 120000 index 0000000..71bfc07 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/recovery/README.md @@ -0,0 +1 @@ +DOC.md \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/README.md new file mode 120000 index 0000000..71bfc07 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/README.md @@ -0,0 +1 @@ +DOC.md \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/README.md new file mode 120000 index 0000000..71bfc07 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/README.md @@ -0,0 +1 @@ +DOC.md \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/README.md new file mode 120000 index 0000000..71bfc07 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/README.md @@ -0,0 +1 @@ +DOC.md \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/README.md new file mode 120000 index 0000000..71bfc07 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/README.md @@ -0,0 +1 @@ +DOC.md \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 0000000..1eb75ef --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 0000000..f65eb39 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 0000000..43e4636 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,168 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 0000000..abade2d --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 0000000..b69237c --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,684 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] + return +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + default: + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + return s.bw.close() +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if uint16(charnum) > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < int(tableSize) { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = uint32(total >> tableLog) + lowOne = uint32((total * 3) >> (tableLog + 1)) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = uint32((total * 3) / (toDistribute * 2)) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < int(tableSize) { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + br.init(s.br.unread()) + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = uint16(in.getBits(tableLog)) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 0000000..535cbad --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 0000000..a4979e8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,329 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBit32(uint32(v))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) peekBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +func (b *bitReader) advance(n uint8) { + b.bitsRead += n +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderShifted) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 0000000..bda4021 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,197 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + b.nBits += encA.nBits + encB.nBits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + return + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + b.bitContainer >>= 1 << 3 + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + b.bitContainer >>= 2 << 3 + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + b.bitContainer >>= 3 << 3 + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + b.bitContainer >>= 4 << 3 + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + b.bitContainer >>= 5 << 3 + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + b.bitContainer >>= 6 << 3 + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + b.bitContainer >>= 7 << 3 + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + b.bitContainer = 0 + b.nBits = 0 + return + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go new file mode 100644 index 0000000..50bcdf6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -0,0 +1,54 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + v3 := int32(b.b[b.off+3]) + v2 := int32(b.b[b.off+2]) + v1 := int32(b.b[b.off+1]) + v0 := int32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + v3 := uint32(b.b[b.off+3]) + v2 := uint32(b.b[b.off+2]) + v1 := uint32(b.b[b.off+1]) + v0 := uint32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 0000000..0843cb0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,651 @@ +package huff0 + +import ( + "fmt" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + + if s.Reuse == ReusePolicyPrefer && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src) +} + +func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + err := bw.close() + return bw.out, err +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + var err error + idx := len(s.Out) + s.Out, err = s.compress1xDo(s.Out, toDo) + if err != nil { + return nil, err + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + var errs [4]error + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + if errs[i] != nil { + return nil, errs[i] + } + o := s.tmpOut[i] + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else { + if s.prevTable[i].nBits == 0 { + reuse = false + } + } + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.br.remain())) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := int16(startNode) + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count + huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].count = 1 << 30 + } + // fake entry, strong barrier + huffNode0[0].count = 1 << 31 + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count + huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].nbBits = 0 + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol].nBits = v.nbBits + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + } + return +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) + huffNode[n].nbBits = maxNbBits + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := uint8(maxNbBits) + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count + lowTotal := 2 * huffNode[lowPos].count + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].nbBits++ + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits == maxNbBits { + n-- + } + huffNode[n+1].nbBits-- + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].nbBits-- + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +type nodeElt struct { + count uint32 + parent uint16 + symbol byte + nbBits uint8 +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 0000000..a03b263 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1146 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle + double []dEntryDouble +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// double-symbols decoding +type dEntryDouble struct { + seq uint16 + nBits uint8 + len uint8 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(in) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) <= int(iSize) { + return s, nil, errors.New("input too small for table") + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, err + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + rank := &rankStats[w] + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + shift := (8 - d.actualTableLog) & 7 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + const shift = 0 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream] = uint8(v.entry >> 8) + + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v2 := single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream+1] = uint8(v.entry >> 8) + + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v2 = single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream] = uint8(v.entry >> 8) + + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v2 := single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream+1] = uint8(v.entry >> 8) + + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v2 = single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := br.off*8 + uint(64-br.bitsRead) + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (8 - d.actualTableLog) & 7 + + const tlSize = 1 << 8 + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + off += 4 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := int(br.off*8) + int(64-br.bitsRead) + for bitsLeft > 0 { + if br.finished() { + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()>>shift].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= int(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 0 + const tlSize = 1 << 8 + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + off += 4 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := int(br.off*8) + int(64-br.bitsRead) + for bitsLeft > 0 { + if br.finished() { + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()>>shift].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= int(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errros, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 0000000..177d6c4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,260 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + br byteReader + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + huffWeight [maxSymbolValue + 1]byte +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.br.init(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/s2/LICENSE b/vendor/github.com/klauspost/compress/s2/LICENSE new file mode 100644 index 0000000..1d2d645 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/s2/cmd/internal/readahead/LICENSE b/vendor/github.com/klauspost/compress/s2/cmd/internal/readahead/LICENSE new file mode 100644 index 0000000..eaeb61a --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/cmd/internal/readahead/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/klauspost/compress/snappy/AUTHORS b/vendor/github.com/klauspost/compress/snappy/AUTHORS new file mode 100644 index 0000000..bcfa195 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS new file mode 100644 index 0000000..931ae31 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/LICENSE b/vendor/github.com/klauspost/compress/snappy/LICENSE new file mode 100644 index 0000000..6050c10 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/snappy/decode.go b/vendor/github.com/klauspost/compress/snappy/decode.go new file mode 100644 index 0000000..72efb03 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.go b/vendor/github.com/klauspost/compress/snappy/decode_amd64.go new file mode 100644 index 0000000..fcd192b --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s new file mode 100644 index 0000000..1c66e37 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s @@ -0,0 +1,482 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + CMPQ SI, R13 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + CMPQ SI, R13 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + CMPQ SI, R13 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + CMPQ SI, R13 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/snappy/decode_other.go b/vendor/github.com/klauspost/compress/snappy/decode_other.go new file mode 100644 index 0000000..94a96c5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_other.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/snappy/encode.go b/vendor/github.com/klauspost/compress/snappy/encode.go new file mode 100644 index 0000000..8d393e9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.go b/vendor/github.com/klauspost/compress/snappy/encode_amd64.go new file mode 100644 index 0000000..150d91b --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.s b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s new file mode 100644 index 0000000..adfd979 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/snappy/encode_other.go b/vendor/github.com/klauspost/compress/snappy/encode_other.go new file mode 100644 index 0000000..dbcae90 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/snappy/snappy.go b/vendor/github.com/klauspost/compress/snappy/snappy.go new file mode 100644 index 0000000..74a3668 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 0000000..8544585 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,136 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) int { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return int(v) +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off >= 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return b.off*8 + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 0000000..303ae90 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,169 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 32 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 0000000..c8ec6e3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,739 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header + maxCompressedLiteralSize = 1 << 18 + maxRLELiteralSize = 1 << 20 + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + history chan *history + input chan struct{} + result chan decodeOutput + sequenceBuf []seq + err error + decWG sync.WaitGroup + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + // Block is RLE, this is the size. + RLESize uint32 + tmp [4]byte + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + result: make(chan decodeOutput, 1), + input: make(chan struct{}, 1), + history: make(chan *history, 1), + } + b.decWG.Add(1) + go b.startDecoder() + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp := br.readSmall(3) + if tmp == nil { + if debug { + println("Reading block header:", io.ErrUnexpectedEOF) + } + return io.ErrUnexpectedEOF + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxBlockSize + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debug { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSize + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debug { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + case blockTypeRaw: + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if cap(b.dataStorage) < cSize { + if b.lowMem { + b.dataStorage = make([]byte, 0, cSize) + } else { + b.dataStorage = make([]byte, 0, maxBlockSize) + } + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + var err error + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debug { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err + b.input <- struct{}{} +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { + close(b.input) + close(b.history) + close(b.result) + b.decWG.Wait() +} + +// decodeAsync will prepare decoding the block when it receives input. +// This will separate output and history. +func (b *blockDec) startDecoder() { + defer b.decWG.Done() + for range b.input { + //println("blockDec: Got block input") + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxBlockSize) + } + } + o := decodeOutput{ + d: b, + b: b.dst[:b.RLESize], + err: nil, + } + v := b.data[0] + for i := range o.b { + o.b[i] = v + } + hist := <-b.history + hist.append(o.b) + b.result <- o + case blockTypeRaw: + o := decodeOutput{ + d: b, + b: b.data, + err: nil, + } + hist := <-b.history + hist.append(o.b) + b.result <- o + case blockTypeCompressed: + b.dst = b.dst[:0] + err := b.decodeCompressed(nil) + o := decodeOutput{ + d: b, + b: b.dst, + err: err, + } + if debug { + println("Decompressed to", len(b.dst), "bytes, error:", err) + } + b.result <- o + case blockTypeReserved: + // Used for returning errors. + <-b.history + b.result <- decodeOutput{ + d: b, + b: nil, + err: b.err, + } + default: + panic("Invalid block type") + } + if debug { + println("blockDec: Finished block") + } + } +} + +// decodeAsync will prepare decoding the block when it receives the history. +// If history is provided, it will not fetch it from the channel. +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + b.dst = hist.b + hist.b = nil + err := b.decodeCompressed(hist) + if debug { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + hist.b = b.dst + b.dst = saved + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +// decodeCompressed will start decompressing a block. +// If no history is supplied the decoder will decodeAsync as much as possible +// before fetching from blockDec.history +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + delayedHistory := hist == nil + + if delayedHistory { + // We must always grab history. + defer func() { + if hist == nil { + <-b.history + } + }() + } + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return ErrBlockTooSmall + } + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debug { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + var literals []byte + var huff *huff0.Scratch + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize) + } else { + if litRegenSize > maxCompressedLiteralSize { + // Exceptional + b.literalBuf = make([]byte, litRegenSize) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) + + } + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debug { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debug { + printf("Found %d compressed literals\n", litCompSize) + } + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + huff = huffDecoderPool.Get().(*huff0.Scratch) + var err error + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + } + } + if huff == nil { + huff = &huff0.Scratch{} + } + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return err + } + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + if debug { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + seqHeader := in[0] + nSeqs := 0 + switch { + case seqHeader == 0: + in = in[1:] + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + // Allocate sequences + if cap(b.sequenceBuf) < nSeqs { + if b.lowMem { + b.sequenceBuf = make([]seq, nSeqs) + } else { + // Allocate max + b.sequenceBuf = make([]seq, nSeqs, maxSequences) + } + } else { + // Reuse buffer + b.sequenceBuf = b.sequenceBuf[:nSeqs] + } + var seqs = &sequenceDecs{} + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debug { + printf("Compression modes: 0b%b", compMode) + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debug { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + dec := fseDecoderPool.Get().(*fseDecoder) + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + dec.setRLE(symb) + seq.fse = dec + if debug { + printf("RLE set to %+v, code: %v", symb, v) + } + case compModeFSE: + println("Reading table for", tableIndex(i)) + dec := fseDecoderPool.Get().(*fseDecoder) + err := dec.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debug { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + seq.fse = dec + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + + // Wait for history. + // All time spent after this is critical since it is strictly sequential. + if hist == nil { + hist = <-b.history + if hist.error { + return ErrDecoderClosed + } + } + + // Decode treeless literal block. + if litType == literalsBlockTreeless { + // TODO: We could send the history early WITHOUT the stream history. + // This would allow decoding treeless literials before the byte history is available. + // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. + // So not much obvious gain here. + + if hist.huffTree == nil { + return errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + } + } + var err error + // Use our out buffer. + huff = hist.huffTree + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return err + } + if len(literals) != litRegenSize { + return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + } else { + if hist.huffTree != nil && huff != nil { + if hist.dict == nil || hist.dict.litDec != hist.huffTree { + huffDecoderPool.Put(hist.huffTree) + } + hist.huffTree = nil + } + } + if huff != nil { + hist.huffTree = huff + } + if debug { + println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") + } + + if nSeqs == 0 { + // Decompressed content is defined entirely as Literals Section content. + b.dst = append(b.dst, literals...) + if delayedHistory { + hist.append(literals) + } + return nil + } + + seqs, err := seqs.mergeHistory(&hist.decoders) + if err != nil { + return err + } + if debug { + println("History merged ok") + } + br := &bitReader{} + if err := br.init(in); err != nil { + return err + } + + // TODO: Investigate if sending history without decoders are faster. + // This would allow the sequences to be decoded async and only have to construct stream history. + // If only recent offsets were not transferred, this would be an obvious win. + // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. + + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history any more. + if hist.dict != nil { + hist.dict.content = nil + } + } + + if err := seqs.initialize(br, hist, literals, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + + err = seqs.decode(nSeqs, br, hbytes) + if err != nil { + return err + } + if !br.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) + } + + err = br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = seqs.out + seqs.out, seqs.literals, seqs.hist = nil, nil, nil + + if !delayedHistory { + // If we don't have delayed history, no need to update. + hist.recentOffsets = seqs.prevOffset + return nil + } + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } + hist.append(b.dst) + hist.recentOffsets = seqs.prevOffset + if debug { + println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") + } + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 0000000..4f0eba2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,837 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + wr bitWriter + + extraLits int + last bool + + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if cap(b.literals) < maxCompressedLiteralSize { + b.literals = make([]byte, 0, maxCompressedLiteralSize) + } + const defSeqs = 200 + b.literals = b.literals[:0] + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debug { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debug { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debug { + println("Adding RAW block, length", len(a)) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debug { + println("Adding RAW block, length", len(src)) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(b.literals))) + + // Don't compress extremely small blocks + if len(b.literals) < 32 || raw { + if debug { + println("Adding RAW block, length", len(b.literals)) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, b.literals...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if len(b.literals) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 32 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + if debug { + println("Adding RAW block, length", len(b.literals)) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, b.literals...) + return nil + case huff0.ErrUseRLE: + if debug { + println("Adding RLE block, length", len(b.literals)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + return nil + default: + return err + case nil: + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debug { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debug { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(b.literals), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram()[:256] + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +func (b *blockEnc) encode(raw bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(raw) + } + // We want some difference + if len(b.literals) > (b.size - (b.size >> 5)) { + return errIncompressible + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 32 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debug { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debug { + println("Adding literals RLE") + } + default: + if debug { + println("Adding literals ERROR:", err) + } + return err + case nil: + // Compressed litLen... + if reUsed { + if debug { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debug { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debug { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debug { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debug { + println("Adding literals compressed") + } + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debug { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debug { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debug { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debug { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debug { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debug { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debug { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debug { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 { + // No need to flush (common) + for seq >= 0 { + s = b.sequences[seq] + wr.flush32() + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + // tabelog max is 8 for all. + of.encode(ofB) + ml.encode(mlB) + ll.encode(llB) + wr.flush32() + + // We checked that all can stay within 32 bits + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.addBits32NC(s.offset, ofB.outBits) + + if debugSequences { + println("Encoded seq", seq, s) + } + + seq-- + } + } else { + for seq >= 0 { + s = b.sequences[seq] + wr.flush32() + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + // tabelog max is below 8 for each. + of.encode(ofB) + ml.encode(mlB) + ll.encode(llB) + wr.flush32() + + // ml+ll = max 32 bits total + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + + if debugSequences { + println("Encoded seq", seq, s) + } + + seq-- + } + } + ml.flush(mlEnc.actualTableLog) + of.flush(ofEnc.actualTableLog) + ll.flush(llEnc.actualTableLog) + err = wr.close() + if err != nil { + return err + } + b.output = wr.out + + if len(b.output)-3-bhOffset >= b.size { + // Maybe even add a bigger margin. + b.litEnc.Reuse = huff0.ReusePolicyNone + return errIncompressible + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debug { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram()[:256] + ofH := b.coders.ofEnc.Histogram()[:256] + mlH := b.coders.mlEnc.Histogram()[:256] + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i, seq := range b.sequences { + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + b.sequences[i] = seq + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 0000000..01a01e4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 0000000..658ef78 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,127 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" + "io/ioutil" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns nil if no more input is available. + readSmall(n int) []byte + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) []byte { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil + } + r := bb[:n] + *b = bb[n:] + return r +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) remain() []byte { + return *b +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, nil + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int) error { + bb := *b + if len(bb) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) []byte { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if n2 != n { + if debug { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil + } + return r.tmp[:n] +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := r.r.Read(r.tmp[:1]) + if err != nil { + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int) error { + n2, err := io.CopyN(ioutil.Discard, r.r, int64(n)) + if n2 != int64(n) { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 0000000..2c4fca1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,88 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 0000000..75bf05b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,546 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "errors" + "io" + "sync" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Streams ready to be decoded. + stream chan decodeStream + + // Current read position used for Reader functionality. + current decoderState + + // Custom dictionaries. + // Always uses copies. + dicts map[uint32]dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel chan struct{} + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which is will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.output = make(chan decodeOutput, d.o.concurrent) + d.current.flushed = true + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + if d.stream == nil { + return 0, errors.New("no input has been initialized") + } + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, nil + } + } + } + if len(d.current.b) > 0 { + if debug { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debug { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + if r == nil { + return errors.New("nil Reader sent as input") + } + + if d.stream == nil { + d.stream = make(chan decodeStream, 1) + d.streamWg.Add(1) + go d.startStreamDecoder(d.stream) + } + + d.drainOutput() + + // If bytes buffer and < 1MB, do sync decoding anyway. + if bb, ok := r.(*bytes.Buffer); ok && bb.Len() < 1<<20 { + if debug { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb.Bytes() + var dst []byte + if cap(d.current.b) > 0 { + dst = d.current.b + } + + dst, err := d.DecodeAll(b, dst[:0]) + if err == nil { + err = io.EOF + } + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debug { + println("sync decode to ", len(dst), "bytes, err:", err) + } + return nil + } + + // Remove current block. + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.cancel = make(chan struct{}) + d.current.flushed = false + d.current.d = nil + + d.stream <- decodeStream{ + r: r, + output: d.current.output, + cancel: d.current.cancel, + } + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + println("cancelling current") + close(d.current.cancel) + d.current.cancel = nil + } + if d.current.d != nil { + if debug { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for { + select { + case v := <-d.current.output: + if v.d != nil { + if debug { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + if v.err == errEndOfStream { + println("current flushed") + d.current.flushed = true + return + } + } + } +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + if d.stream == nil { + return 0, errors.New("no input has been initialized") + } + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && d.current.err == nil { + d.current.err = err2 + break + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.current.err == ErrDecoderClosed { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + defer func() { + if debug { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err == io.EOF { + return dst, nil + } + if frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + return nil, ErrUnknownDictionary + } + frame.history.setDict(&dict) + } + if err != nil { + return dst, err + } + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { + return dst, ErrDecoderSizeExceeded + } + if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { + // Never preallocate moe than 1 GB up front. + if uint64(cap(dst)) < frame.FrameContentSize { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) + copy(dst2, dst) + dst = dst2 + } + } + if cap(dst) == 0 { + // Allocate window size * 2 by default if nothing is provided and we didn't get frame content size. + size := frame.WindowSize * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if len(frame.bBuf) == 0 { + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.d != nil { + if debug { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } + if d.current.err != nil { + // Keep error state. + return blocking + } + + if blocking { + d.current.decodeOutput = <-d.current.output + } else { + select { + case d.current.decodeOutput = <-d.current.output: + default: + return false + } + } + if debug { + println("got", len(d.current.b), "bytes, error:", d.current.err) + } + return true +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.stream != nil { + close(d.stream) + d.streamWg.Wait() + d.stream = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// RegisterDict will load a dictionary +func (d *Decoder) RegisterDict(b []byte) error { + dc, err := loadDict(b) + if err != nil { + return err + } + if d.dicts == nil { + d.dicts = make(map[uint32]dict, 1) + } + d.dicts[dc.id] = *dc + return nil +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +type decodeStream struct { + r io.Reader + + // Blocks ready to be written to output. + output chan decodeOutput + + // cancel reading from the input + cancel chan struct{} +} + +// errEndOfStream indicates that everything from the stream was read. +var errEndOfStream = errors.New("end-of-stream") + +// Create Decoder: +// Spawn n block decoders. These accept tasks to decode a block. +// Create goroutine that handles stream processing, this will send history to decoders as they are available. +// Decoders update the history as they decode. +// When a block is returned: +// a) history is sent to the next decoder, +// b) content written to CRC. +// c) return data to WRITER. +// d) wait for next block to return data. +// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. +func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { + defer d.streamWg.Done() + frame := newFrameDec(d.o) + for stream := range inStream { + if debug { + println("got new stream") + } + br := readerWrapper{r: stream.r} + decodeStream: + for { + frame.history.reset() + err := frame.reset(&br) + if debug && err != nil { + println("Frame decoder returned", err) + } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } + if err != nil { + stream.output <- decodeOutput{ + err: err, + } + break + } + if debug { + println("starting frame decoder") + } + + // This goroutine will forward history between frames. + frame.frameDone.Add(1) + frame.initAsync() + + go frame.startDecoder(stream.output) + decodeFrame: + // Go through all blocks of the frame. + for { + dec := <-d.decoders + select { + case <-stream.cancel: + if !frame.sendErr(dec, io.EOF) { + // To not let the decoder dangle, send it back. + stream.output <- decodeOutput{d: dec} + } + break decodeStream + default: + } + err := frame.next(dec) + switch err { + case io.EOF: + // End of current frame, no error + println("EOF on next block") + break decodeFrame + case nil: + continue + default: + println("block decoder returned", err) + break decodeStream + } + } + // All blocks have started decoding, check if there are more frames. + println("waiting for done") + frame.frameDone.Wait() + println("done waiting...") + } + frame.frameDone.Wait() + println("Sending EOS") + stream.output <- decodeOutput{err: errEndOfStream} + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 0000000..2ac9cd2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,68 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + } + o.maxDecodedSize = 1 << 63 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n <= 0 { + return fmt.Errorf("Concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// For streaming operations, the maximum window size is capped at 1<<30 bytes. +// Maximum and default is 1 << 63 bytes. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return fmt.Errorf("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 0000000..8eb6f6b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,104 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litDec *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + offsets [3]int + content []byte +} + +var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if !bytes.Equal(b[:4], dictMagic[:]) { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litDec, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, err + } + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debug { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 0000000..c120d90 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,518 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, betterLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.Encode(blk, src) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 0000000..50276bc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,678 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, dFastLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hash8(cv0, dFastLongTableBits)] = te0 + e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hash5(cv0, dFastShortTableBits)] = te0 + e.table[hash5(cv1, dFastShortTableBits)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, dFastLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hash8(cv0, dFastLongTableBits)] = te0 + e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hash5(cv0, dFastShortTableBits)] = te0 + e.table[hash5(cv1, dFastShortTableBits)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv1>>8, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 0000000..4104b45 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,755 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int) int32 { + if size > 0 && size < int(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hash6(cv, hashLog) + nextHash2 := hash6(cv>>8, hashLog) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + // length = 4 + e.matchlen(s+6, repIndex+4, src) + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Store this, since we have it. + nextHash := hash6(cv, hashLog) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debug { + if len(src) > maxBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hash6(cv, hashLog) + nextHash2 := hash6(cv>>8, hashLog) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + // length := 4 + e.matchlen(s+6, repIndex+4, src) + // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:])) + var length int32 + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + //l := e.matchlenNoHist(s+4, t+4, src) + 4 + // l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlenNoHist(s+4, o2+4, src) + // l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Store this, since we have it. + nextHash := hash6(cv, hashLog) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + l := e.maxMatchOff * 2 + // Make it at least 1MB. + if l < 1<<20 { + l = 1 << 20 + } + e.hist = make([]byte, 0, l) + } else { + if cap(e.hist) < int(e.maxMatchOff*2) { + panic("unexpected buffer size") + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 { + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) Reset(singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + if !singleBlock && cap(e.hist) < int(e.maxMatchOff*2) { + l := e.maxMatchOff * 2 + // Make it at least 1MB. + if l < 1<<20 { + l = 1 << 20 + } + e.hist = make([]byte, 0, l) + } + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_params.go b/vendor/github.com/klauspost/compress/zstd/enc_params.go new file mode 100644 index 0000000..d874116 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_params.go @@ -0,0 +1,157 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +/* +// encParams are not really used, just here for reference. +type encParams struct { + // largest match distance : larger == more compression, more memory needed during decompression + windowLog uint8 + + // fully searched segment : larger == more compression, slower, more memory (useless for fast) + chainLog uint8 + + // dispatch table : larger == faster, more memory + hashLog uint8 + + // < nb of searches : larger == more compression, slower + searchLog uint8 + + // < match length searched : larger == faster decompression, sometimes less compression + minMatch uint8 + + // acceptable match size for optimal parser (only) : larger == more compression, slower + targetLength uint32 + + // see ZSTD_strategy definition above + strategy strategy +} + +// strategy defines the algorithm to use when generating sequences. +type strategy uint8 + +const ( + // Compression strategies, listed from fastest to strongest + strategyFast strategy = iota + 1 + strategyDfast + strategyGreedy + strategyLazy + strategyLazy2 + strategyBtlazy2 + strategyBtopt + strategyBtultra + strategyBtultra2 + // note : new strategies _might_ be added in the future. + // Only the order (from fast to strong) is guaranteed + +) + +var defEncParams = [4][]encParams{ + { // "default" - for any srcSize > 256 KB + // W, C, H, S, L, TL, strat + {19, 12, 13, 1, 6, 1, strategyFast}, // base for negative levels + {19, 13, 14, 1, 7, 0, strategyFast}, // level 1 + {20, 15, 16, 1, 6, 0, strategyFast}, // level 2 + {21, 16, 17, 1, 5, 1, strategyDfast}, // level 3 + {21, 18, 18, 1, 5, 1, strategyDfast}, // level 4 + {21, 18, 19, 2, 5, 2, strategyGreedy}, // level 5 + {21, 19, 19, 3, 5, 4, strategyGreedy}, // level 6 + {21, 19, 19, 3, 5, 8, strategyLazy}, // level 7 + {21, 19, 19, 3, 5, 16, strategyLazy2}, // level 8 + {21, 19, 20, 4, 5, 16, strategyLazy2}, // level 9 + {22, 20, 21, 4, 5, 16, strategyLazy2}, // level 10 + {22, 21, 22, 4, 5, 16, strategyLazy2}, // level 11 + {22, 21, 22, 5, 5, 16, strategyLazy2}, // level 12 + {22, 21, 22, 5, 5, 32, strategyBtlazy2}, // level 13 + {22, 22, 23, 5, 5, 32, strategyBtlazy2}, // level 14 + {22, 23, 23, 6, 5, 32, strategyBtlazy2}, // level 15 + {22, 22, 22, 5, 5, 48, strategyBtopt}, // level 16 + {23, 23, 22, 5, 4, 64, strategyBtopt}, // level 17 + {23, 23, 22, 6, 3, 64, strategyBtultra}, // level 18 + {23, 24, 22, 7, 3, 256, strategyBtultra2}, // level 19 + {25, 25, 23, 7, 3, 256, strategyBtultra2}, // level 20 + {26, 26, 24, 7, 3, 512, strategyBtultra2}, // level 21 + {27, 27, 25, 9, 3, 999, strategyBtultra2}, // level 22 + }, + { // for srcSize <= 256 KB + // W, C, H, S, L, T, strat + {18, 12, 13, 1, 5, 1, strategyFast}, // base for negative levels + {18, 13, 14, 1, 6, 0, strategyFast}, // level 1 + {18, 14, 14, 1, 5, 1, strategyDfast}, // level 2 + {18, 16, 16, 1, 4, 1, strategyDfast}, // level 3 + {18, 16, 17, 2, 5, 2, strategyGreedy}, // level 4. + {18, 18, 18, 3, 5, 2, strategyGreedy}, // level 5. + {18, 18, 19, 3, 5, 4, strategyLazy}, // level 6. + {18, 18, 19, 4, 4, 4, strategyLazy}, // level 7 + {18, 18, 19, 4, 4, 8, strategyLazy2}, // level 8 + {18, 18, 19, 5, 4, 8, strategyLazy2}, // level 9 + {18, 18, 19, 6, 4, 8, strategyLazy2}, // level 10 + {18, 18, 19, 5, 4, 12, strategyBtlazy2}, // level 11. + {18, 19, 19, 7, 4, 12, strategyBtlazy2}, // level 12. + {18, 18, 19, 4, 4, 16, strategyBtopt}, // level 13 + {18, 18, 19, 4, 3, 32, strategyBtopt}, // level 14. + {18, 18, 19, 6, 3, 128, strategyBtopt}, // level 15. + {18, 19, 19, 6, 3, 128, strategyBtultra}, // level 16. + {18, 19, 19, 8, 3, 256, strategyBtultra}, // level 17. + {18, 19, 19, 6, 3, 128, strategyBtultra2}, // level 18. + {18, 19, 19, 8, 3, 256, strategyBtultra2}, // level 19. + {18, 19, 19, 10, 3, 512, strategyBtultra2}, // level 20. + {18, 19, 19, 12, 3, 512, strategyBtultra2}, // level 21. + {18, 19, 19, 13, 3, 999, strategyBtultra2}, // level 22. + }, + { // for srcSize <= 128 KB + // W, C, H, S, L, T, strat + {17, 12, 12, 1, 5, 1, strategyFast}, // base for negative levels + {17, 12, 13, 1, 6, 0, strategyFast}, // level 1 + {17, 13, 15, 1, 5, 0, strategyFast}, // level 2 + {17, 15, 16, 2, 5, 1, strategyDfast}, // level 3 + {17, 17, 17, 2, 4, 1, strategyDfast}, // level 4 + {17, 16, 17, 3, 4, 2, strategyGreedy}, // level 5 + {17, 17, 17, 3, 4, 4, strategyLazy}, // level 6 + {17, 17, 17, 3, 4, 8, strategyLazy2}, // level 7 + {17, 17, 17, 4, 4, 8, strategyLazy2}, // level 8 + {17, 17, 17, 5, 4, 8, strategyLazy2}, // level 9 + {17, 17, 17, 6, 4, 8, strategyLazy2}, // level 10 + {17, 17, 17, 5, 4, 8, strategyBtlazy2}, // level 11 + {17, 18, 17, 7, 4, 12, strategyBtlazy2}, // level 12 + {17, 18, 17, 3, 4, 12, strategyBtopt}, // level 13. + {17, 18, 17, 4, 3, 32, strategyBtopt}, // level 14. + {17, 18, 17, 6, 3, 256, strategyBtopt}, // level 15. + {17, 18, 17, 6, 3, 128, strategyBtultra}, // level 16. + {17, 18, 17, 8, 3, 256, strategyBtultra}, // level 17. + {17, 18, 17, 10, 3, 512, strategyBtultra}, // level 18. + {17, 18, 17, 5, 3, 256, strategyBtultra2}, // level 19. + {17, 18, 17, 7, 3, 512, strategyBtultra2}, // level 20. + {17, 18, 17, 9, 3, 512, strategyBtultra2}, // level 21. + {17, 18, 17, 11, 3, 999, strategyBtultra2}, // level 22. + }, + { // for srcSize <= 16 KB + // W, C, H, S, L, T, strat + {14, 12, 13, 1, 5, 1, strategyFast}, // base for negative levels + {14, 14, 15, 1, 5, 0, strategyFast}, // level 1 + {14, 14, 15, 1, 4, 0, strategyFast}, // level 2 + {14, 14, 15, 2, 4, 1, strategyDfast}, // level 3 + {14, 14, 14, 4, 4, 2, strategyGreedy}, // level 4 + {14, 14, 14, 3, 4, 4, strategyLazy}, // level 5. + {14, 14, 14, 4, 4, 8, strategyLazy2}, // level 6 + {14, 14, 14, 6, 4, 8, strategyLazy2}, // level 7 + {14, 14, 14, 8, 4, 8, strategyLazy2}, // level 8. + {14, 15, 14, 5, 4, 8, strategyBtlazy2}, // level 9. + {14, 15, 14, 9, 4, 8, strategyBtlazy2}, // level 10. + {14, 15, 14, 3, 4, 12, strategyBtopt}, // level 11. + {14, 15, 14, 4, 3, 24, strategyBtopt}, // level 12. + {14, 15, 14, 5, 3, 32, strategyBtultra}, // level 13. + {14, 15, 15, 6, 3, 64, strategyBtultra}, // level 14. + {14, 15, 15, 7, 3, 256, strategyBtultra}, // level 15. + {14, 15, 15, 5, 3, 48, strategyBtultra2}, // level 16. + {14, 15, 15, 6, 3, 128, strategyBtultra2}, // level 17. + {14, 15, 15, 7, 3, 256, strategyBtultra2}, // level 18. + {14, 15, 15, 8, 3, 256, strategyBtultra2}, // level 19. + {14, 15, 15, 8, 3, 512, strategyBtultra2}, // level 20. + {14, 15, 15, 9, 3, 512, strategyBtultra2}, // level 21. + {14, 15, 15, 10, 3, 999, strategyBtultra2}, // level 22. + }, +} +*/ diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 0000000..bf42bb1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,560 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "fmt" + "io" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int) int32 + UseBlock(*blockEnc) + Reset(singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + // If not single block, history will be allocated on first use. + enc.Reset(true) + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + if s.writing == nil { + s.writing = &blockEnc{} + s.writing.init() + } + s.writing.initNewEncode() + s.filling = s.filling[:0] + s.current = s.current[:0] + s.previous = s.previous[:0] + s.encoder.Reset(false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.writeErr = nil +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) > 0 { + s.current = e.EncodeAll(s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: 0, + WindowSize: uint32(s.encoder.WindowSize(0)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: 0, + } + dst, err := fh.appendTo(tmp[:0]) + if err != nil { + return err + } + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.wg.Add(1) + go func(src []byte) { + if debug { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(e.o.noEntropy) + } + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.writeErr = err + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debug { + println("Using ReadFrom") + } + // Maybe handle stuff queued? + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debug { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, e.nextBlock(true) + default: + if debug { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + case nil: + } + if len(src) > 0 { + if debug { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + return err + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst, _ = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + // Release encoder reference to last block. + // If a non-single block is needed the encoder will reset again. + enc.Reset(true) + e.encoders <- enc + }() + // Use single segments when above minimum window and below 1MB. + single := len(src) < 1<<20 && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(len(src))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: 0, + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 { + dst = make([]byte, 0, len(src)) + } + dst, err := fh.appendTo(dst) + if err != nil { + panic(err) + } + + // If we can do everything in one block, prefer that. + if len(src) <= maxCompressedBlockSize { + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + enc.EncodeNoHist(blk, src) + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + err := errIncompressible + oldout := blk.output + if len(blk.literals) != len(src) || len(src) != e.o.blockSize { + // Output directly to dst + blk.output = dst + err = blk.encode(e.o.noEntropy) + } + + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, src) + case nil: + dst = blk.output + default: + panic(err) + } + blk.output = oldout + } else { + enc.Reset(false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.reset(nil) + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { + err = blk.encode(e.o.noEntropy) + } + + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, todo) + blk.popOffsets() + case nil: + dst = append(dst, blk.output...) + default: + panic(err) + } + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 0000000..3fc0309 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,249 @@ +package zstd + +import ( + "errors" + "fmt" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + customWindow bool +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + // use less ram: true for now, but may change. + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: 1 << 16, + windowSize: 8 << 20, + level: SpeedDefault, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedDefault: + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}}} + case SpeedBetterCompression: + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}} + case SpeedFastest: + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + o.pad = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast + + // SpeedBestCompression will choose the best available compression option. + // For now this is not implemented. + SpeedBestCompression = SpeedBetterCompression +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := EncoderLevel(speedNotSet + 1); l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level > 5: + return SpeedBetterCompression + } + return SpeedDefault +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 16 << 20 + } + } + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 0000000..fc4a566 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,494 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/hex" + "errors" + "hash" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc hash.Hash64 + offset int64 + + WindowSize uint64 + + // maxWindowSize is the maximum windows size to support. + // should never be bigger than max-int. + maxWindowSize uint64 + + // In order queue of blocks being decoded. + decoding chan *blockDec + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + frameDone sync.WaitGroup + + DictionaryID *uint32 + HasCheckSum bool + SingleSegment bool + + // asyncRunning indicates whether the async routine processes input on 'decoding'. + asyncRunningMu sync.Mutex + asyncRunning bool +} + +const ( + // The minimum Window_Size is 1 KB. + MinWindowSize = 1 << 10 + MaxWindowSize = 1 << 29 +) + +var ( + frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} + skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} +) + +func newFrameDec(o decoderOptions) *frameDec { + d := frameDec{ + o: o, + maxWindowSize: MaxWindowSize, + } + if d.maxWindowSize > o.maxDecodedSize { + d.maxWindowSize = o.maxDecodedSize + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var b []byte + for { + b = br.readSmall(4) + if b == nil { + return io.EOF + } + if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + if debug { + println("Not skippable", hex.EncodeToString(b), hex.EncodeToString(skippableFrameMagic)) + } + // Break if not skippable frame. + break + } + // Read size to skip + b = br.readSmall(4) + if b == nil { + println("Reading Frame Size EOF") + return io.ErrUnexpectedEOF + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err := br.skipN(int(n)) + if err != nil { + if debug { + println("Reading discarded frame", err) + } + return err + } + } + if !bytes.Equal(b, frameMagic) { + println("Got magic numbers: ", b, "want:", frameMagic) + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + println("Reading Frame_Header_Descriptor", err) + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("Reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + println("Reading Window_Descriptor", err) + return err + } + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = nil + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + b = br.readSmall(int(size)) + if b == nil { + if debug { + println("Reading Dictionary_ID", io.ErrUnexpectedEOF) + } + return io.ErrUnexpectedEOF + } + var id uint32 + switch size { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debug { + println("Dict size", size, "ID:", id) + } + if id > 0 { + // ID 0 means "sorry, no dictionary anyway". + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format + d.DictionaryID = &id + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = 0 + if fcsSize > 0 { + b := br.readSmall(fcsSize) + if b == nil { + println("Reading Frame content", io.ErrUnexpectedEOF) + return io.ErrUnexpectedEOF + } + switch fcsSize { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debug { + println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) + } + } + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + } + + if d.WindowSize > d.maxWindowSize { + printf("window size %d > max %d\n", d.WindowSize, d.maxWindowSize) + return ErrWindowSizeExceeded + } + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + println("got window size: ", d.WindowSize) + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if d.o.lowMem && d.history.windowSize < maxBlockSize { + d.history.maxSize = d.history.windowSize * 2 + } else { + d.history.maxSize = d.history.windowSize + maxBlockSize + } + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debug { + printf("decoding new block %p:%p", block, block.data) + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + d.sendErr(block, err) + return err + } + block.input <- struct{}{} + if debug { + println("next block:", block) + } + d.asyncRunningMu.Lock() + defer d.asyncRunningMu.Unlock() + if !d.asyncRunning { + return nil + } + if block.Last { + // We indicate the frame is done by sending io.EOF + d.decoding <- block + return io.EOF + } + d.decoding <- block + return nil +} + +// sendEOF will queue an error block on the frame. +// This will cause the frame decoder to return when it encounters the block. +// Returns true if the decoder was added. +func (d *frameDec) sendErr(block *blockDec, err error) bool { + d.asyncRunningMu.Lock() + defer d.asyncRunningMu.Unlock() + if !d.asyncRunning { + return false + } + + println("sending error", err.Error()) + block.sendErr(err) + d.decoding <- block + return true +} + +// checkCRC will check the checksum if the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + if !d.HasCheckSum { + return nil + } + var tmp [4]byte + got := d.crc.Sum64() + // Flip to match file order. + tmp[0] = byte(got >> 0) + tmp[1] = byte(got >> 8) + tmp[2] = byte(got >> 16) + tmp[3] = byte(got >> 24) + + // We can overwrite upper tmp now + want := d.rawInput.readSmall(4) + if want == nil { + println("CRC missing?") + return io.ErrUnexpectedEOF + } + + if !bytes.Equal(tmp[:], want) { + if debug { + println("CRC Check Failed:", tmp[:], "!=", want) + } + return ErrCRCMismatch + } + if debug { + println("CRC ok", tmp[:]) + } + return nil +} + +func (d *frameDec) initAsync() { + if !d.o.lowMem && !d.SingleSegment { + // set max extra size history to 10MB. + d.history.maxSize = d.history.windowSize + maxBlockSize*5 + } + // re-alloc if more than one extra block size. + if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { + d.history.b = make([]byte, 0, d.history.maxSize) + } + if cap(d.history.b) < d.history.maxSize { + d.history.b = make([]byte, 0, d.history.maxSize) + } + if cap(d.decoding) < d.o.concurrent { + d.decoding = make(chan *blockDec, d.o.concurrent) + } + if debug { + h := d.history + printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) + } + d.asyncRunningMu.Lock() + d.asyncRunning = true + d.asyncRunningMu.Unlock() +} + +// startDecoder will start decoding blocks and write them to the writer. +// The decoder will stop as soon as an error occurs or at end of frame. +// When the frame has finished decoding the *bufio.Reader +// containing the remaining input will be sent on frameDec.frameDone. +func (d *frameDec) startDecoder(output chan decodeOutput) { + written := int64(0) + + defer func() { + d.asyncRunningMu.Lock() + d.asyncRunning = false + d.asyncRunningMu.Unlock() + + // Drain the currently decoding. + d.history.error = true + flushdone: + for { + select { + case b := <-d.decoding: + b.history <- &d.history + output <- <-b.result + default: + break flushdone + } + } + println("frame decoder done, signalling done") + d.frameDone.Done() + }() + // Get decoder for first block. + block := <-d.decoding + block.history <- &d.history + for { + var next *blockDec + // Get result + r := <-block.result + if r.err != nil { + println("Result contained error", r.err) + output <- r + return + } + if debug { + println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) + d.offset += int64(len(r.b)) + } + if !block.Last { + // Send history to next block + select { + case next = <-d.decoding: + if debug { + println("Sending ", len(d.history.b), "bytes as history") + } + next.history <- &d.history + default: + // Wait until we have sent the block, so + // other decoders can potentially get the decoder. + next = nil + } + } + + // Add checksum, async to decoding. + if d.HasCheckSum { + n, err := d.crc.Write(r.b) + if err != nil { + r.err = err + if n != len(r.b) { + r.err = io.ErrShortWrite + } + output <- r + return + } + } + written += int64(len(r.b)) + if d.SingleSegment && uint64(written) > d.FrameContentSize { + println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) + r.err = ErrFrameSizeExceeded + output <- r + return + } + if block.Last { + r.err = d.checkCRC() + output <- r + return + } + output <- r + if next == nil { + // There was no decoder available, we wait for one now that we have sent to the writer. + if debug { + println("Sending ", len(d.history.b), " bytes as history") + } + next = <-d.decoding + next.history <- &d.history + } + block = next + } +} + +// runDecoder will create a sync decoder that will decode a block of data. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + // Store input length, so we only check new data. + crcStart := len(dst) + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debug { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil || dec.Last { + break + } + if uint64(len(d.history.b)) > d.o.maxDecodedSize { + err = ErrDecoderSizeExceeded + break + } + if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { + println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) + err = ErrFrameSizeExceeded + break + } + } + dst = d.history.b + if err == nil { + if d.HasCheckSum { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 0000000..4479cfe --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,115 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 // Not stored. +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) ([]byte, error) { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are nto stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst, nil +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 0000000..e6d3d49 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,385 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + // println(s.norm[:s.symbolLen], s.symbolLen) + return s.buildDtable() +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baseline() uint32 { + return uint32(d >> 32) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { + *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setBaseline(baseline uint32) { + const mask = 0xffffffff + *d = (*d & mask) | decSymbol(baseline)<<32 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + symbolNext[i] = 1 + } else { + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// next returns the current symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (s *fseState) next(br *bitReader) { + lowBits := uint16(br.getBits(s.state.nbBits())) + s.state = s.dt[s.state.newState()+lowBits] +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (s *fseState) finished(br *bitReader) bool { + return br.finished() && s.state.nbBits() > 0 +} + +// final returns the current state symbol without decoding the next. +func (s *fseState) final() (int, uint8) { + return s.state.baselineInt(), s.state.addBits() +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { + lowBits := uint16(br.getBitsFast(s.state.nbBits())) + s.state = s.dt[s.state.newState()+lowBits] + return s.state.baseline(), s.state.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 0000000..aa9eba8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,726 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *fseEncoder) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *fseEncoder) prepare() (*fseEncoder, error) { + if s == nil { + s = &fseEncoder{} + } + s.useRLE = false + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + return s, nil +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < int(tableSize) { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int16(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int16(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debug { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = uint32(total >> tableLog) + lowOne = uint32((total * 3) >> (tableLog + 1)) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = uint32((total * 3) / (toDistribute * 2)) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] + return +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState) + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 0000000..6c17dc1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic(fmt.Sprintf("invalid decoding table, base overflows int32")) + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 0000000..4a75206 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,77 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest l bytes of u for a size size of h bytes. +// l must be >=4 and <=8. Any other value will return hash for 4 bytes. +// h should always be <32. +// Preferably h and l should be a constant. +// FIXME: This does NOT get resolved, if 'mls' is constant, +// so this cannot be used. +func hashLen(u uint64, hashLog, mls uint8) uint32 { + switch mls { + case 5: + return hash5(u, hashLog) + case 6: + return hash6(u, hashLog) + case 7: + return hash7(u, hashLog) + case 8: + return hash8(u, hashLog) + default: + return hash4x64(u, hashLog) + } +} + +// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash3(u uint32, h uint8) uint32 { + return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) +} + +// hash4 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4(u uint32, h uint8) uint32 { + return (u * prime4bytes) >> ((32 - h) & 31) +} + +// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4x64(u uint64, h uint8) uint32 { + return (uint32(u) * prime4bytes) >> ((32 - h) & 31) +} + +// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash5(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) +} + +// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash6(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) +} + +// hash8 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash8(u uint64, h uint8) uint32 { + return uint32((u * prime8bytes) >> ((64 - h) & 63)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 0000000..f418f50 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,89 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + b []byte + huffTree *huff0.Scratch + recentOffsets [3]int + decoders sequenceDecs + windowSize int + maxSize int + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + if f := h.decoders.offsets.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + h.decoders = sequenceDecs{} + if h.huffTree != nil { + if h.dict == nil || h.dict.litDec != h.huffTree { + huffDecoderPool.Put(h.huffTree) + } + } + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.recentOffsets = dict.offsets + h.huffTree = dict.litDec +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 0000000..24b5306 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 0000000..426b9ca --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,238 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + b = b[len(d.mem):] + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go new file mode 100644 index 0000000..35318d7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go @@ -0,0 +1,13 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(*Digest, []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 0000000..2c9c535 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,215 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// CX pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// R15 prime4v + +// round reads from and advances the buffer pointer in CX. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (CX), R12 \ + ADDQ $8, CX \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ R15, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), R15 + + // Load slice. + MOVQ b_base+0(FP), CX + MOVQ b_len+8(FP), DX + LEAQ (CX)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until CX > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + ADDQ $24, BX + + CMPQ CX, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (CX), R8 + ADDQ $8, CX + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ R15, AX + + CMPQ CX, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ CX, BX + JG singles + + MOVL (CX), R8 + ADDQ $4, CX + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ CX, BX + JGE finalize + +singlesLoop: + MOVBQZX (CX), R12 + ADDQ $1, CX + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ CX, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ arg1_base+8(FP), CX + MOVQ arg1_len+16(FP), DX + LEAQ (CX)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ arg+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is CX minus the old base pointer. + SUBQ arg1_base+8(FP), CX + MOVQ CX, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 0000000..4a5a821 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,76 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 0000000..6f3b0cb --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 0000000..7ff8704 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,485 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1<= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data\n", seqs-i) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(s.out) + if size-startSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size", size) + } + if size > cap(s.out) { + // Not enough size, will be extremely rarely triggered, + // but could be if destination slice is too small for sync operations. + // We add maxBlockSize to the capacity. + s.out = append(s.out, make([]byte, maxBlockSize)...) + s.out = s.out[:len(s.out)-maxBlockSize] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + s.out = append(s.out, s.literals[:ll]...) + s.literals = s.literals[ll:] + out := s.out + + if mo > len(s.out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + mo -= len(s.dict) - dictO + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(s.out); v > 0 { + // v is the start position in history from end. + start := len(s.hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, s.hist[start:]...) + mo -= v + ml -= v + } else { + out = append(out, s.hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(s.out) - mo + if ml <= len(s.out)-start { + // No overlap + out = append(out, s.out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + s.out = out + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.getBitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + // Add final literals + s.out = append(s.out, s.literals...) + return nil +} + +// update states, at least 27 bits must be available. +func (s *sequenceDecs) update(br *bitReader) { + // Max 8 bits + s.litLengths.state.next(br) + // Max 9 bits + s.matchLengths.state.next(br) + // Max 8 bits + s.offsets.state.next(br) +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +// update states, at least 27 bits must be available. +func (s *sequenceDecs) updateAlt(br *bitReader) { + // Update all 3 states at once. Approx 20% faster. + a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + + nBits := a.nbBits() + b.nbBits() + c.nbBits() + if nBits == 0 { + s.litLengths.state.state = s.litLengths.state.dt[a.newState()] + s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] + s.offsets.state.state = s.offsets.state.dt[c.newState()] + return + } + bits := br.getBitsFast(nBits) + lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) + s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] + + lowBits = uint16(bits >> (c.nbBits() & 31)) + lowBits &= bitMask[b.nbBits()&15] + s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] + + lowBits = uint16(bits) & bitMask[c.nbBits()&15] + s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] +} + +// nextFast will return new states when there are at least 4 unused bytes left on the stream when done. +func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + return + } + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + return + } + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + return +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + if s.maxBits <= 32 { + mo += br.getBits(moB) + ml += br.getBits(mlB) + ll += br.getBits(llB) + } else { + mo += br.getBits(moB) + br.fill() + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + + } + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} + +// mergeHistory will merge history. +func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { + for i := uint(0); i < 3; i++ { + var sNew, sHist *sequenceDec + switch i { + default: + // same as "case 0": + sNew = &s.litLengths + sHist = &hist.litLengths + case 1: + sNew = &s.offsets + sHist = &hist.offsets + case 2: + sNew = &s.matchLengths + sHist = &hist.matchLengths + } + if sNew.repeat { + if sHist.fse == nil { + return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) + } + continue + } + if sNew.fse == nil { + return nil, fmt.Errorf("sequence stream %d, no fse found", i) + } + if sHist.fse != nil && !sHist.fse.preDefined { + fseDecoderPool.Put(sHist.fse) + } + sHist.fse = sNew.fse + } + return hist, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 0000000..36bcc3c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,115 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + return + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 0000000..356956b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,436 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/snappy" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + var header []byte + var n int + header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + println("snappy.Decode:", err) + return written, err + } + err = r.block.encodeLits(false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debug { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debug { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 0000000..0807719 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,144 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "errors" + "log" + "math" + "math/bits" +) + +// enable debug printing +const debug = false + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// Reset the buffer offset when reaching this. +const bufferReset = math.MaxInt32 - MaxWindowSize + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + // For the time being dictionaries are not supported. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") +) + +func println(a ...interface{}) { + if debug { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug { + log.Printf(format, a...) + } +} + +// matchLenFast does matching, but will not match the last up to 7 bytes. +func matchLenFast(a, b []byte) int { + endI := len(a) & (math.MaxInt32 - 7) + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + return i + bits.TrailingZeros64(diff)>>3 + } + } + return endI +} + +// matchLen returns the maximum length. +// a must be the shortest of the two. +// The function also returns whether all bytes matched. +func matchLen(a, b []byte) int { + b = b[:len(a)] + for i := 0; i < len(a)-7; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + return i + (bits.TrailingZeros64(diff) >> 3) + } + } + + checked := (len(a) >> 3) << 3 + a = a[checked:] + b = b[checked:] + for i := range a { + if a[i] != b[i] { + return i + checked + } + } + return len(a) + checked +} + +func load3232(b []byte, i int32) uint32 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:4] + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6432(b []byte, i int32) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func load64(b []byte, i int) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} diff --git a/vendor/github.com/prometheus/common/log/eventlog_formatter.go b/vendor/github.com/prometheus/common/log/eventlog_formatter.go new file mode 100644 index 0000000..bcf68e6 --- /dev/null +++ b/vendor/github.com/prometheus/common/log/eventlog_formatter.go @@ -0,0 +1,89 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package log + +import ( + "fmt" + "os" + + "golang.org/x/sys/windows/svc/eventlog" + + "github.com/sirupsen/logrus" +) + +func init() { + setEventlogFormatter = func(l logger, name string, debugAsInfo bool) error { + if name == "" { + return fmt.Errorf("missing name parameter") + } + + fmter, err := newEventlogger(name, debugAsInfo, l.entry.Logger.Formatter) + if err != nil { + fmt.Fprintf(os.Stderr, "error creating eventlog formatter: %v\n", err) + l.Errorf("can't connect logger to eventlog: %v", err) + return err + } + l.entry.Logger.Formatter = fmter + return nil + } +} + +type eventlogger struct { + log *eventlog.Log + debugAsInfo bool + wrap logrus.Formatter +} + +func newEventlogger(name string, debugAsInfo bool, fmter logrus.Formatter) (*eventlogger, error) { + logHandle, err := eventlog.Open(name) + if err != nil { + return nil, err + } + return &eventlogger{log: logHandle, debugAsInfo: debugAsInfo, wrap: fmter}, nil +} + +func (s *eventlogger) Format(e *logrus.Entry) ([]byte, error) { + data, err := s.wrap.Format(e) + if err != nil { + fmt.Fprintf(os.Stderr, "eventlogger: can't format entry: %v\n", err) + return data, err + } + + switch e.Level { + case logrus.PanicLevel: + fallthrough + case logrus.FatalLevel: + fallthrough + case logrus.ErrorLevel: + err = s.log.Error(102, e.Message) + case logrus.WarnLevel: + err = s.log.Warning(101, e.Message) + case logrus.InfoLevel: + err = s.log.Info(100, e.Message) + case logrus.DebugLevel: + if s.debugAsInfo { + err = s.log.Info(100, e.Message) + } + default: + err = s.log.Info(100, e.Message) + } + + if err != nil { + fmt.Fprintf(os.Stderr, "eventlogger: can't send log to eventlog: %v\n", err) + } + + return data, err +} diff --git a/vendor/github.com/prometheus/common/log/log.go b/vendor/github.com/prometheus/common/log/log.go new file mode 100644 index 0000000..1088302 --- /dev/null +++ b/vendor/github.com/prometheus/common/log/log.go @@ -0,0 +1,364 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "net/url" + "os" + "runtime" + "strconv" + "strings" + + "github.com/sirupsen/logrus" + "gopkg.in/alecthomas/kingpin.v2" +) + +// setSyslogFormatter is nil if the target architecture does not support syslog. +var setSyslogFormatter func(logger, string, string) error + +// setEventlogFormatter is nil if the target OS does not support Eventlog (i.e., is not Windows). +var setEventlogFormatter func(logger, string, bool) error + +func setJSONFormatter() { + origLogger.Formatter = &logrus.JSONFormatter{} +} + +type loggerSettings struct { + level string + format string +} + +func (s *loggerSettings) apply(ctx *kingpin.ParseContext) error { + err := baseLogger.SetLevel(s.level) + if err != nil { + return err + } + err = baseLogger.SetFormat(s.format) + return err +} + +// AddFlags adds the flags used by this package to the Kingpin application. +// To use the default Kingpin application, call AddFlags(kingpin.CommandLine) +func AddFlags(a *kingpin.Application) { + s := loggerSettings{} + a.Flag("log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]"). + Default(origLogger.Level.String()). + StringVar(&s.level) + defaultFormat := url.URL{Scheme: "logger", Opaque: "stderr"} + a.Flag("log.format", `Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`). + Default(defaultFormat.String()). + StringVar(&s.format) + a.Action(s.apply) +} + +// Logger is the interface for loggers used in the Prometheus components. +type Logger interface { + Debug(...interface{}) + Debugln(...interface{}) + Debugf(string, ...interface{}) + + Info(...interface{}) + Infoln(...interface{}) + Infof(string, ...interface{}) + + Warn(...interface{}) + Warnln(...interface{}) + Warnf(string, ...interface{}) + + Error(...interface{}) + Errorln(...interface{}) + Errorf(string, ...interface{}) + + Fatal(...interface{}) + Fatalln(...interface{}) + Fatalf(string, ...interface{}) + + With(key string, value interface{}) Logger + + SetFormat(string) error + SetLevel(string) error +} + +type logger struct { + entry *logrus.Entry +} + +func (l logger) With(key string, value interface{}) Logger { + return logger{l.entry.WithField(key, value)} +} + +// Debug logs a message at level Debug on the standard logger. +func (l logger) Debug(args ...interface{}) { + l.sourced().Debug(args...) +} + +// Debug logs a message at level Debug on the standard logger. +func (l logger) Debugln(args ...interface{}) { + l.sourced().Debugln(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func (l logger) Debugf(format string, args ...interface{}) { + l.sourced().Debugf(format, args...) +} + +// Info logs a message at level Info on the standard logger. +func (l logger) Info(args ...interface{}) { + l.sourced().Info(args...) +} + +// Info logs a message at level Info on the standard logger. +func (l logger) Infoln(args ...interface{}) { + l.sourced().Infoln(args...) +} + +// Infof logs a message at level Info on the standard logger. +func (l logger) Infof(format string, args ...interface{}) { + l.sourced().Infof(format, args...) +} + +// Warn logs a message at level Warn on the standard logger. +func (l logger) Warn(args ...interface{}) { + l.sourced().Warn(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func (l logger) Warnln(args ...interface{}) { + l.sourced().Warnln(args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func (l logger) Warnf(format string, args ...interface{}) { + l.sourced().Warnf(format, args...) +} + +// Error logs a message at level Error on the standard logger. +func (l logger) Error(args ...interface{}) { + l.sourced().Error(args...) +} + +// Error logs a message at level Error on the standard logger. +func (l logger) Errorln(args ...interface{}) { + l.sourced().Errorln(args...) +} + +// Errorf logs a message at level Error on the standard logger. +func (l logger) Errorf(format string, args ...interface{}) { + l.sourced().Errorf(format, args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func (l logger) Fatal(args ...interface{}) { + l.sourced().Fatal(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func (l logger) Fatalln(args ...interface{}) { + l.sourced().Fatalln(args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func (l logger) Fatalf(format string, args ...interface{}) { + l.sourced().Fatalf(format, args...) +} + +func (l logger) SetLevel(level string) error { + lvl, err := logrus.ParseLevel(level) + if err != nil { + return err + } + + l.entry.Logger.Level = lvl + return nil +} + +func (l logger) SetFormat(format string) error { + u, err := url.Parse(format) + if err != nil { + return err + } + if u.Scheme != "logger" { + return fmt.Errorf("invalid scheme %s", u.Scheme) + } + jsonq := u.Query().Get("json") + if jsonq == "true" { + setJSONFormatter() + } + + switch u.Opaque { + case "syslog": + if setSyslogFormatter == nil { + return fmt.Errorf("system does not support syslog") + } + appname := u.Query().Get("appname") + facility := u.Query().Get("local") + return setSyslogFormatter(l, appname, facility) + case "eventlog": + if setEventlogFormatter == nil { + return fmt.Errorf("system does not support eventlog") + } + name := u.Query().Get("name") + debugAsInfo := false + debugAsInfoRaw := u.Query().Get("debugAsInfo") + if parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil { + debugAsInfo = parsedDebugAsInfo + } + return setEventlogFormatter(l, name, debugAsInfo) + case "stdout": + l.entry.Logger.Out = os.Stdout + case "stderr": + l.entry.Logger.Out = os.Stderr + default: + return fmt.Errorf("unsupported logger %q", u.Opaque) + } + return nil +} + +// sourced adds a source field to the logger that contains +// the file name and line where the logging happened. +func (l logger) sourced() *logrus.Entry { + _, file, line, ok := runtime.Caller(2) + if !ok { + file = "" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + file = file[slash+1:] + } + return l.entry.WithField("source", fmt.Sprintf("%s:%d", file, line)) +} + +var origLogger = logrus.New() +var baseLogger = logger{entry: logrus.NewEntry(origLogger)} + +// Base returns the default Logger logging to +func Base() Logger { + return baseLogger +} + +// NewLogger returns a new Logger logging to out. +func NewLogger(w io.Writer) Logger { + l := logrus.New() + l.Out = w + return logger{entry: logrus.NewEntry(l)} +} + +// NewNopLogger returns a logger that discards all log messages. +func NewNopLogger() Logger { + l := logrus.New() + l.Out = ioutil.Discard + return logger{entry: logrus.NewEntry(l)} +} + +// With adds a field to the logger. +func With(key string, value interface{}) Logger { + return baseLogger.With(key, value) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + baseLogger.sourced().Debug(args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + baseLogger.sourced().Debugln(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + baseLogger.sourced().Debugf(format, args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + baseLogger.sourced().Info(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + baseLogger.sourced().Infoln(args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + baseLogger.sourced().Infof(format, args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + baseLogger.sourced().Warn(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + baseLogger.sourced().Warnln(args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + baseLogger.sourced().Warnf(format, args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + baseLogger.sourced().Error(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + baseLogger.sourced().Errorln(args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + baseLogger.sourced().Errorf(format, args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + baseLogger.sourced().Fatal(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + baseLogger.sourced().Fatalln(args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + baseLogger.sourced().Fatalf(format, args...) +} + +// AddHook adds hook to Prometheus' original logger. +func AddHook(hook logrus.Hook) { + origLogger.Hooks.Add(hook) +} + +type errorLogWriter struct{} + +func (errorLogWriter) Write(b []byte) (int, error) { + baseLogger.sourced().Error(string(b)) + return len(b), nil +} + +// NewErrorLogger returns a log.Logger that is meant to be used +// in the ErrorLog field of an http.Server to log HTTP server errors. +func NewErrorLogger() *log.Logger { + return log.New(&errorLogWriter{}, "", 0) +} diff --git a/vendor/github.com/prometheus/common/log/syslog_formatter.go b/vendor/github.com/prometheus/common/log/syslog_formatter.go new file mode 100644 index 0000000..f882f2f --- /dev/null +++ b/vendor/github.com/prometheus/common/log/syslog_formatter.go @@ -0,0 +1,126 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows,!nacl,!plan9 + +package log + +import ( + "fmt" + "log/syslog" + "os" + + "github.com/sirupsen/logrus" +) + +var _ logrus.Formatter = (*syslogger)(nil) + +func init() { + setSyslogFormatter = func(l logger, appname, local string) error { + if appname == "" { + return fmt.Errorf("missing appname parameter") + } + if local == "" { + return fmt.Errorf("missing local parameter") + } + + fmter, err := newSyslogger(appname, local, l.entry.Logger.Formatter) + if err != nil { + fmt.Fprintf(os.Stderr, "error creating syslog formatter: %v\n", err) + l.entry.Errorf("can't connect logger to syslog: %v", err) + return err + } + l.entry.Logger.Formatter = fmter + return nil + } +} + +var prefixTag []byte + +type syslogger struct { + wrap logrus.Formatter + out *syslog.Writer +} + +func newSyslogger(appname string, facility string, fmter logrus.Formatter) (*syslogger, error) { + priority, err := getFacility(facility) + if err != nil { + return nil, err + } + out, err := syslog.New(priority, appname) + _, isJSON := fmter.(*logrus.JSONFormatter) + if isJSON { + // add cee tag to json formatted syslogs + prefixTag = []byte("@cee:") + } + return &syslogger{ + out: out, + wrap: fmter, + }, err +} + +func getFacility(facility string) (syslog.Priority, error) { + switch facility { + case "0": + return syslog.LOG_LOCAL0, nil + case "1": + return syslog.LOG_LOCAL1, nil + case "2": + return syslog.LOG_LOCAL2, nil + case "3": + return syslog.LOG_LOCAL3, nil + case "4": + return syslog.LOG_LOCAL4, nil + case "5": + return syslog.LOG_LOCAL5, nil + case "6": + return syslog.LOG_LOCAL6, nil + case "7": + return syslog.LOG_LOCAL7, nil + } + return syslog.LOG_LOCAL0, fmt.Errorf("invalid local(%s) for syslog", facility) +} + +func (s *syslogger) Format(e *logrus.Entry) ([]byte, error) { + data, err := s.wrap.Format(e) + if err != nil { + fmt.Fprintf(os.Stderr, "syslogger: can't format entry: %v\n", err) + return data, err + } + // only append tag to data sent to syslog (line), not to what + // is returned + line := string(append(prefixTag, data...)) + + switch e.Level { + case logrus.PanicLevel: + err = s.out.Crit(line) + case logrus.FatalLevel: + err = s.out.Crit(line) + case logrus.ErrorLevel: + err = s.out.Err(line) + case logrus.WarnLevel: + err = s.out.Warning(line) + case logrus.InfoLevel: + err = s.out.Info(line) + case logrus.DebugLevel: + err = s.out.Debug(line) + default: + err = s.out.Notice(line) + } + + if err != nil { + fmt.Fprintf(os.Stderr, "syslogger: can't send log to syslog: %v\n", err) + } + + return data, err +} diff --git a/vendor/github.com/xdg/scram/LICENSE b/vendor/github.com/xdg/scram/LICENSE new file mode 100644 index 0000000..67db858 --- /dev/null +++ b/vendor/github.com/xdg/scram/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/xdg/scram/client.go b/vendor/github.com/xdg/scram/client.go new file mode 100644 index 0000000..ca0c4c7 --- /dev/null +++ b/vendor/github.com/xdg/scram/client.go @@ -0,0 +1,130 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "sync" + + "golang.org/x/crypto/pbkdf2" +) + +// Client implements the client side of SCRAM authentication. It holds +// configuration values needed to initialize new client-side conversations for +// a specific username, password and authorization ID tuple. Client caches +// the computationally-expensive parts of a SCRAM conversation as described in +// RFC-5802. If repeated authentication conversations may be required for a +// user (e.g. disconnect/reconnect), the user's Client should be preserved. +// +// For security reasons, Clients have a default minimum PBKDF2 iteration count +// of 4096. If a server requests a smaller iteration count, an authentication +// conversation will error. +// +// A Client can also be used by a server application to construct the hashed +// authentication values to be stored for a new user. See StoredCredentials() +// for more. +type Client struct { + sync.RWMutex + username string + password string + authzID string + minIters int + nonceGen NonceGeneratorFcn + hashGen HashGeneratorFcn + cache map[KeyFactors]derivedKeys +} + +func newClient(username, password, authzID string, fcn HashGeneratorFcn) *Client { + return &Client{ + username: username, + password: password, + authzID: authzID, + minIters: 4096, + nonceGen: defaultNonceGenerator, + hashGen: fcn, + cache: make(map[KeyFactors]derivedKeys), + } +} + +// WithMinIterations changes minimum required PBKDF2 iteration count. +func (c *Client) WithMinIterations(n int) *Client { + c.Lock() + defer c.Unlock() + c.minIters = n + return c +} + +// WithNonceGenerator replaces the default nonce generator (base64 encoding of +// 24 bytes from crypto/rand) with a custom generator. This is provided for +// testing or for users with custom nonce requirements. +func (c *Client) WithNonceGenerator(ng NonceGeneratorFcn) *Client { + c.Lock() + defer c.Unlock() + c.nonceGen = ng + return c +} + +// NewConversation constructs a client-side authentication conversation. +// Conversations cannot be reused, so this must be called for each new +// authentication attempt. +func (c *Client) NewConversation() *ClientConversation { + c.RLock() + defer c.RUnlock() + return &ClientConversation{ + client: c, + nonceGen: c.nonceGen, + hashGen: c.hashGen, + minIters: c.minIters, + } +} + +func (c *Client) getDerivedKeys(kf KeyFactors) derivedKeys { + dk, ok := c.getCache(kf) + if !ok { + dk = c.computeKeys(kf) + c.setCache(kf, dk) + } + return dk +} + +// GetStoredCredentials takes a salt and iteration count structure and +// provides the values that must be stored by a server to authentication a +// user. These values are what the Server credential lookup function must +// return for a given username. +func (c *Client) GetStoredCredentials(kf KeyFactors) StoredCredentials { + dk := c.getDerivedKeys(kf) + return StoredCredentials{ + KeyFactors: kf, + StoredKey: dk.StoredKey, + ServerKey: dk.ServerKey, + } +} + +func (c *Client) computeKeys(kf KeyFactors) derivedKeys { + h := c.hashGen() + saltedPassword := pbkdf2.Key([]byte(c.password), []byte(kf.Salt), kf.Iters, h.Size(), c.hashGen) + clientKey := computeHMAC(c.hashGen, saltedPassword, []byte("Client Key")) + + return derivedKeys{ + ClientKey: clientKey, + StoredKey: computeHash(c.hashGen, clientKey), + ServerKey: computeHMAC(c.hashGen, saltedPassword, []byte("Server Key")), + } +} + +func (c *Client) getCache(kf KeyFactors) (derivedKeys, bool) { + c.RLock() + defer c.RUnlock() + dk, ok := c.cache[kf] + return dk, ok +} + +func (c *Client) setCache(kf KeyFactors, dk derivedKeys) { + c.Lock() + defer c.Unlock() + c.cache[kf] = dk + return +} diff --git a/vendor/github.com/xdg/scram/client_conv.go b/vendor/github.com/xdg/scram/client_conv.go new file mode 100644 index 0000000..8340568 --- /dev/null +++ b/vendor/github.com/xdg/scram/client_conv.go @@ -0,0 +1,149 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "crypto/hmac" + "encoding/base64" + "errors" + "fmt" + "strings" +) + +type clientState int + +const ( + clientStarting clientState = iota + clientFirst + clientFinal + clientDone +) + +// ClientConversation implements the client-side of an authentication +// conversation with a server. A new conversation must be created for +// each authentication attempt. +type ClientConversation struct { + client *Client + nonceGen NonceGeneratorFcn + hashGen HashGeneratorFcn + minIters int + state clientState + valid bool + gs2 string + nonce string + c1b string + serveSig []byte +} + +// Step takes a string provided from a server (or just an empty string for the +// very first conversation step) and attempts to move the authentication +// conversation forward. It returns a string to be sent to the server or an +// error if the server message is invalid. Calling Step after a conversation +// completes is also an error. +func (cc *ClientConversation) Step(challenge string) (response string, err error) { + switch cc.state { + case clientStarting: + cc.state = clientFirst + response, err = cc.firstMsg() + case clientFirst: + cc.state = clientFinal + response, err = cc.finalMsg(challenge) + case clientFinal: + cc.state = clientDone + response, err = cc.validateServer(challenge) + default: + response, err = "", errors.New("Conversation already completed") + } + return +} + +// Done returns true if the conversation is completed or has errored. +func (cc *ClientConversation) Done() bool { + return cc.state == clientDone +} + +// Valid returns true if the conversation successfully authenticated with the +// server, including counter-validation that the server actually has the +// user's stored credentials. +func (cc *ClientConversation) Valid() bool { + return cc.valid +} + +func (cc *ClientConversation) firstMsg() (string, error) { + // Values are cached for use in final message parameters + cc.gs2 = cc.gs2Header() + cc.nonce = cc.client.nonceGen() + cc.c1b = fmt.Sprintf("n=%s,r=%s", encodeName(cc.client.username), cc.nonce) + + return cc.gs2 + cc.c1b, nil +} + +func (cc *ClientConversation) finalMsg(s1 string) (string, error) { + msg, err := parseServerFirst(s1) + if err != nil { + return "", err + } + + // Check nonce prefix and update + if !strings.HasPrefix(msg.nonce, cc.nonce) { + return "", errors.New("server nonce did not extend client nonce") + } + cc.nonce = msg.nonce + + // Check iteration count vs minimum + if msg.iters < cc.minIters { + return "", fmt.Errorf("server requested too few iterations (%d)", msg.iters) + } + + // Create client-final-message-without-proof + c2wop := fmt.Sprintf( + "c=%s,r=%s", + base64.StdEncoding.EncodeToString([]byte(cc.gs2)), + cc.nonce, + ) + + // Create auth message + authMsg := cc.c1b + "," + s1 + "," + c2wop + + // Get derived keys from client cache + dk := cc.client.getDerivedKeys(KeyFactors{Salt: string(msg.salt), Iters: msg.iters}) + + // Create proof as clientkey XOR clientsignature + clientSignature := computeHMAC(cc.hashGen, dk.StoredKey, []byte(authMsg)) + clientProof := xorBytes(dk.ClientKey, clientSignature) + proof := base64.StdEncoding.EncodeToString(clientProof) + + // Cache ServerSignature for later validation + cc.serveSig = computeHMAC(cc.hashGen, dk.ServerKey, []byte(authMsg)) + + return fmt.Sprintf("%s,p=%s", c2wop, proof), nil +} + +func (cc *ClientConversation) validateServer(s2 string) (string, error) { + msg, err := parseServerFinal(s2) + if err != nil { + return "", err + } + + if len(msg.err) > 0 { + return "", fmt.Errorf("server error: %s", msg.err) + } + + if !hmac.Equal(msg.verifier, cc.serveSig) { + return "", errors.New("server validation failed") + } + + cc.valid = true + return "", nil +} + +func (cc *ClientConversation) gs2Header() string { + if cc.client.authzID == "" { + return "n,," + } + return fmt.Sprintf("n,%s,", encodeName(cc.client.authzID)) +} diff --git a/vendor/github.com/xdg/scram/common.go b/vendor/github.com/xdg/scram/common.go new file mode 100644 index 0000000..cb705cb --- /dev/null +++ b/vendor/github.com/xdg/scram/common.go @@ -0,0 +1,97 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "crypto/hmac" + "crypto/rand" + "encoding/base64" + "strings" +) + +// NonceGeneratorFcn defines a function that returns a string of high-quality +// random printable ASCII characters EXCLUDING the comma (',') character. The +// default nonce generator provides Base64 encoding of 24 bytes from +// crypto/rand. +type NonceGeneratorFcn func() string + +// derivedKeys collects the three cryptographically derived values +// into one struct for caching. +type derivedKeys struct { + ClientKey []byte + StoredKey []byte + ServerKey []byte +} + +// KeyFactors represent the two server-provided factors needed to compute +// client credentials for authentication. Salt is decoded bytes (i.e. not +// base64), but in string form so that KeyFactors can be used as a map key for +// cached credentials. +type KeyFactors struct { + Salt string + Iters int +} + +// StoredCredentials are the values that a server must store for a given +// username to allow authentication. They include the salt and iteration +// count, plus the derived values to authenticate a client and for the server +// to authenticate itself back to the client. +// +// NOTE: these are specific to a given hash function. To allow a user to +// authenticate with either SCRAM-SHA-1 or SCRAM-SHA-256, two sets of +// StoredCredentials must be created and stored, one for each hash function. +type StoredCredentials struct { + KeyFactors + StoredKey []byte + ServerKey []byte +} + +// CredentialLookup is a callback to provide StoredCredentials for a given +// username. This is used to configure Server objects. +// +// NOTE: these are specific to a given hash function. The callback provided +// to a Server with a given hash function must provide the corresponding +// StoredCredentials. +type CredentialLookup func(string) (StoredCredentials, error) + +func defaultNonceGenerator() string { + raw := make([]byte, 24) + nonce := make([]byte, base64.StdEncoding.EncodedLen(len(raw))) + rand.Read(raw) + base64.StdEncoding.Encode(nonce, raw) + return string(nonce) +} + +func encodeName(s string) string { + return strings.Replace(strings.Replace(s, "=", "=3D", -1), ",", "=2C", -1) +} + +func decodeName(s string) (string, error) { + // TODO Check for = not followed by 2C or 3D + return strings.Replace(strings.Replace(s, "=2C", ",", -1), "=3D", "=", -1), nil +} + +func computeHash(hg HashGeneratorFcn, b []byte) []byte { + h := hg() + h.Write(b) + return h.Sum(nil) +} + +func computeHMAC(hg HashGeneratorFcn, key, data []byte) []byte { + mac := hmac.New(hg, key) + mac.Write(data) + return mac.Sum(nil) +} + +func xorBytes(a, b []byte) []byte { + // TODO check a & b are same length, or just xor to smallest + xor := make([]byte, len(a)) + for i := range a { + xor[i] = a[i] ^ b[i] + } + return xor +} diff --git a/vendor/github.com/xdg/scram/doc.go b/vendor/github.com/xdg/scram/doc.go new file mode 100644 index 0000000..d43bee6 --- /dev/null +++ b/vendor/github.com/xdg/scram/doc.go @@ -0,0 +1,24 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package scram provides client and server implementations of the Salted +// Challenge Response Authentication Mechanism (SCRAM) described in RFC-5802 +// and RFC-7677. +// +// Usage +// +// The scram package provides two variables, `SHA1` and `SHA256`, that are +// used to construct Client or Server objects. +// +// clientSHA1, err := scram.SHA1.NewClient(username, password, authID) +// clientSHA256, err := scram.SHA256.NewClient(username, password, authID) +// +// serverSHA1, err := scram.SHA1.NewServer(credentialLookupFcn) +// serverSHA256, err := scram.SHA256.NewServer(credentialLookupFcn) +// +// These objects are used to construct ClientConversation or +// ServerConversation objects that are used to carry out authentication. +package scram diff --git a/vendor/github.com/xdg/scram/parse.go b/vendor/github.com/xdg/scram/parse.go new file mode 100644 index 0000000..722f604 --- /dev/null +++ b/vendor/github.com/xdg/scram/parse.go @@ -0,0 +1,205 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "encoding/base64" + "errors" + "fmt" + "strconv" + "strings" +) + +type c1Msg struct { + gs2Header string + authzID string + username string + nonce string + c1b string +} + +type c2Msg struct { + cbind []byte + nonce string + proof []byte + c2wop string +} + +type s1Msg struct { + nonce string + salt []byte + iters int +} + +type s2Msg struct { + verifier []byte + err string +} + +func parseField(s, k string) (string, error) { + t := strings.TrimPrefix(s, k+"=") + if t == s { + return "", fmt.Errorf("error parsing '%s' for field '%s'", s, k) + } + return t, nil +} + +func parseGS2Flag(s string) (string, error) { + if s[0] == 'p' { + return "", fmt.Errorf("channel binding requested but not supported") + } + + if s == "n" || s == "y" { + return s, nil + } + + return "", fmt.Errorf("error parsing '%s' for gs2 flag", s) +} + +func parseFieldBase64(s, k string) ([]byte, error) { + raw, err := parseField(s, k) + if err != nil { + return nil, err + } + + dec, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, err + } + + return dec, nil +} + +func parseFieldInt(s, k string) (int, error) { + raw, err := parseField(s, k) + if err != nil { + return 0, err + } + + num, err := strconv.Atoi(raw) + if err != nil { + return 0, fmt.Errorf("error parsing field '%s': %v", k, err) + } + + return num, nil +} + +func parseClientFirst(c1 string) (msg c1Msg, err error) { + + fields := strings.Split(c1, ",") + if len(fields) < 4 { + err = errors.New("not enough fields in first server message") + return + } + + gs2flag, err := parseGS2Flag(fields[0]) + if err != nil { + return + } + + // 'a' field is optional + if len(fields[1]) > 0 { + msg.authzID, err = parseField(fields[1], "a") + if err != nil { + return + } + } + + // Recombine and save the gs2 header + msg.gs2Header = gs2flag + "," + msg.authzID + "," + + // Check for unsupported extensions field "m". + if strings.HasPrefix(fields[2], "m=") { + err = errors.New("SCRAM message extensions are not supported") + return + } + + msg.username, err = parseField(fields[2], "n") + if err != nil { + return + } + + msg.nonce, err = parseField(fields[3], "r") + if err != nil { + return + } + + msg.c1b = strings.Join(fields[2:], ",") + + return +} + +func parseClientFinal(c2 string) (msg c2Msg, err error) { + fields := strings.Split(c2, ",") + if len(fields) < 3 { + err = errors.New("not enough fields in first server message") + return + } + + msg.cbind, err = parseFieldBase64(fields[0], "c") + if err != nil { + return + } + + msg.nonce, err = parseField(fields[1], "r") + if err != nil { + return + } + + // Extension fields may come between nonce and proof, so we + // grab the *last* fields as proof. + msg.proof, err = parseFieldBase64(fields[len(fields)-1], "p") + if err != nil { + return + } + + msg.c2wop = c2[:strings.LastIndex(c2, ",")] + + return +} + +func parseServerFirst(s1 string) (msg s1Msg, err error) { + + // Check for unsupported extensions field "m". + if strings.HasPrefix(s1, "m=") { + err = errors.New("SCRAM message extensions are not supported") + return + } + + fields := strings.Split(s1, ",") + if len(fields) < 3 { + err = errors.New("not enough fields in first server message") + return + } + + msg.nonce, err = parseField(fields[0], "r") + if err != nil { + return + } + + msg.salt, err = parseFieldBase64(fields[1], "s") + if err != nil { + return + } + + msg.iters, err = parseFieldInt(fields[2], "i") + + return +} + +func parseServerFinal(s2 string) (msg s2Msg, err error) { + fields := strings.Split(s2, ",") + + msg.verifier, err = parseFieldBase64(fields[0], "v") + if err == nil { + return + } + + msg.err, err = parseField(fields[0], "e") + + return +} diff --git a/vendor/github.com/xdg/scram/scram.go b/vendor/github.com/xdg/scram/scram.go new file mode 100644 index 0000000..9e9836a --- /dev/null +++ b/vendor/github.com/xdg/scram/scram.go @@ -0,0 +1,66 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "crypto/sha1" + "crypto/sha256" + "fmt" + "hash" + + "github.com/xdg/stringprep" +) + +// HashGeneratorFcn abstracts a factory function that returns a hash.Hash +// value to be used for SCRAM operations. Generally, one would use the +// provided package variables, `scram.SHA1` and `scram.SHA256`, for the most +// common forms of SCRAM. +type HashGeneratorFcn func() hash.Hash + +// SHA1 is a function that returns a crypto/sha1 hasher and should be used to +// create Client objects configured for SHA-1 hashing. +var SHA1 HashGeneratorFcn = func() hash.Hash { return sha1.New() } + +// SHA256 is a function that returns a crypto/sha256 hasher and should be used +// to create Client objects configured for SHA-256 hashing. +var SHA256 HashGeneratorFcn = func() hash.Hash { return sha256.New() } + +// NewClient constructs a SCRAM client component based on a given hash.Hash +// factory receiver. This constructor will normalize the username, password +// and authzID via the SASLprep algorithm, as recommended by RFC-5802. If +// SASLprep fails, the method returns an error. +func (f HashGeneratorFcn) NewClient(username, password, authzID string) (*Client, error) { + var userprep, passprep, authprep string + var err error + + if userprep, err = stringprep.SASLprep.Prepare(username); err != nil { + return nil, fmt.Errorf("Error SASLprepping username '%s': %v", username, err) + } + if passprep, err = stringprep.SASLprep.Prepare(password); err != nil { + return nil, fmt.Errorf("Error SASLprepping password '%s': %v", password, err) + } + if authprep, err = stringprep.SASLprep.Prepare(authzID); err != nil { + return nil, fmt.Errorf("Error SASLprepping authzID '%s': %v", authzID, err) + } + + return newClient(userprep, passprep, authprep, f), nil +} + +// NewClientUnprepped acts like NewClient, except none of the arguments will +// be normalized via SASLprep. This is not generally recommended, but is +// provided for users that may have custom normalization needs. +func (f HashGeneratorFcn) NewClientUnprepped(username, password, authzID string) (*Client, error) { + return newClient(username, password, authzID, f), nil +} + +// NewServer constructs a SCRAM server component based on a given hash.Hash +// factory receiver. To be maximally generic, it uses dependency injection to +// handle credential lookup, which is the process of turning a username string +// into a struct with stored credentials for authentication. +func (f HashGeneratorFcn) NewServer(cl CredentialLookup) (*Server, error) { + return newServer(cl, f) +} diff --git a/vendor/github.com/xdg/scram/server.go b/vendor/github.com/xdg/scram/server.go new file mode 100644 index 0000000..b119b36 --- /dev/null +++ b/vendor/github.com/xdg/scram/server.go @@ -0,0 +1,50 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import "sync" + +// Server implements the server side of SCRAM authentication. It holds +// configuration values needed to initialize new server-side conversations. +// Generally, this can be persistent within an application. +type Server struct { + sync.RWMutex + credentialCB CredentialLookup + nonceGen NonceGeneratorFcn + hashGen HashGeneratorFcn +} + +func newServer(cl CredentialLookup, fcn HashGeneratorFcn) (*Server, error) { + return &Server{ + credentialCB: cl, + nonceGen: defaultNonceGenerator, + hashGen: fcn, + }, nil +} + +// WithNonceGenerator replaces the default nonce generator (base64 encoding of +// 24 bytes from crypto/rand) with a custom generator. This is provided for +// testing or for users with custom nonce requirements. +func (s *Server) WithNonceGenerator(ng NonceGeneratorFcn) *Server { + s.Lock() + defer s.Unlock() + s.nonceGen = ng + return s +} + +// NewConversation constructs a server-side authentication conversation. +// Conversations cannot be reused, so this must be called for each new +// authentication attempt. +func (s *Server) NewConversation() *ServerConversation { + s.RLock() + defer s.RUnlock() + return &ServerConversation{ + nonceGen: s.nonceGen, + hashGen: s.hashGen, + credentialCB: s.credentialCB, + } +} diff --git a/vendor/github.com/xdg/scram/server_conv.go b/vendor/github.com/xdg/scram/server_conv.go new file mode 100644 index 0000000..9c8838c --- /dev/null +++ b/vendor/github.com/xdg/scram/server_conv.go @@ -0,0 +1,151 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "crypto/hmac" + "encoding/base64" + "errors" + "fmt" +) + +type serverState int + +const ( + serverFirst serverState = iota + serverFinal + serverDone +) + +// ServerConversation implements the server-side of an authentication +// conversation with a client. A new conversation must be created for +// each authentication attempt. +type ServerConversation struct { + nonceGen NonceGeneratorFcn + hashGen HashGeneratorFcn + credentialCB CredentialLookup + state serverState + credential StoredCredentials + valid bool + gs2Header string + username string + authzID string + nonce string + c1b string + s1 string +} + +// Step takes a string provided from a client and attempts to move the +// authentication conversation forward. It returns a string to be sent to the +// client or an error if the client message is invalid. Calling Step after a +// conversation completes is also an error. +func (sc *ServerConversation) Step(challenge string) (response string, err error) { + switch sc.state { + case serverFirst: + sc.state = serverFinal + response, err = sc.firstMsg(challenge) + case serverFinal: + sc.state = serverDone + response, err = sc.finalMsg(challenge) + default: + response, err = "", errors.New("Conversation already completed") + } + return +} + +// Done returns true if the conversation is completed or has errored. +func (sc *ServerConversation) Done() bool { + return sc.state == serverDone +} + +// Valid returns true if the conversation successfully authenticated the +// client. +func (sc *ServerConversation) Valid() bool { + return sc.valid +} + +// Username returns the client-provided username. This is valid to call +// if the first conversation Step() is successful. +func (sc *ServerConversation) Username() string { + return sc.username +} + +// AuthzID returns the (optional) client-provided authorization identity, if +// any. If one was not provided, it returns the empty string. This is valid +// to call if the first conversation Step() is successful. +func (sc *ServerConversation) AuthzID() string { + return sc.authzID +} + +func (sc *ServerConversation) firstMsg(c1 string) (string, error) { + msg, err := parseClientFirst(c1) + if err != nil { + sc.state = serverDone + return "", err + } + + sc.gs2Header = msg.gs2Header + sc.username = msg.username + sc.authzID = msg.authzID + + sc.credential, err = sc.credentialCB(msg.username) + if err != nil { + sc.state = serverDone + return "e=unknown-user", err + } + + sc.nonce = msg.nonce + sc.nonceGen() + sc.c1b = msg.c1b + sc.s1 = fmt.Sprintf("r=%s,s=%s,i=%d", + sc.nonce, + base64.StdEncoding.EncodeToString([]byte(sc.credential.Salt)), + sc.credential.Iters, + ) + + return sc.s1, nil +} + +// For errors, returns server error message as well as non-nil error. Callers +// can choose whether to send server error or not. +func (sc *ServerConversation) finalMsg(c2 string) (string, error) { + msg, err := parseClientFinal(c2) + if err != nil { + return "", err + } + + // Check channel binding matches what we expect; in this case, we expect + // just the gs2 header we received as we don't support channel binding + // with a data payload. If we add binding, we need to independently + // compute the header to match here. + if string(msg.cbind) != sc.gs2Header { + return "e=channel-bindings-dont-match", fmt.Errorf("channel binding received '%s' doesn't match expected '%s'", msg.cbind, sc.gs2Header) + } + + // Check nonce received matches what we sent + if msg.nonce != sc.nonce { + return "e=other-error", errors.New("nonce received did not match nonce sent") + } + + // Create auth message + authMsg := sc.c1b + "," + sc.s1 + "," + msg.c2wop + + // Retrieve ClientKey from proof and verify it + clientSignature := computeHMAC(sc.hashGen, sc.credential.StoredKey, []byte(authMsg)) + clientKey := xorBytes([]byte(msg.proof), clientSignature) + storedKey := computeHash(sc.hashGen, clientKey) + + // Compare with constant-time function + if !hmac.Equal(storedKey, sc.credential.StoredKey) { + return "e=invalid-proof", errors.New("challenge proof invalid") + } + + sc.valid = true + + // Compute and return server verifier + serverSignature := computeHMAC(sc.hashGen, sc.credential.ServerKey, []byte(authMsg)) + return "v=" + base64.StdEncoding.EncodeToString(serverSignature), nil +} diff --git a/vendor/github.com/xdg/stringprep/LICENSE b/vendor/github.com/xdg/stringprep/LICENSE new file mode 100644 index 0000000..67db858 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/xdg/stringprep/bidi.go b/vendor/github.com/xdg/stringprep/bidi.go new file mode 100644 index 0000000..6f6d321 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/bidi.go @@ -0,0 +1,73 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package stringprep + +var errHasLCat = "BiDi string can't have runes from category L" +var errFirstRune = "BiDi string first rune must have category R or AL" +var errLastRune = "BiDi string last rune must have category R or AL" + +// Check for prohibited characters from table C.8 +func checkBiDiProhibitedRune(s string) error { + for _, r := range s { + if TableC8.Contains(r) { + return Error{Msg: errProhibited, Rune: r} + } + } + return nil +} + +// Check for LCat characters from table D.2 +func checkBiDiLCat(s string) error { + for _, r := range s { + if TableD2.Contains(r) { + return Error{Msg: errHasLCat, Rune: r} + } + } + return nil +} + +// Check first and last characters are in table D.1; requires non-empty string +func checkBadFirstAndLastRandALCat(s string) error { + rs := []rune(s) + if !TableD1.Contains(rs[0]) { + return Error{Msg: errFirstRune, Rune: rs[0]} + } + n := len(rs) - 1 + if !TableD1.Contains(rs[n]) { + return Error{Msg: errLastRune, Rune: rs[n]} + } + return nil +} + +// Look for RandALCat characters from table D.1 +func hasBiDiRandALCat(s string) bool { + for _, r := range s { + if TableD1.Contains(r) { + return true + } + } + return false +} + +// Check that BiDi rules are satisfied ; let empty string pass this rule +func passesBiDiRules(s string) error { + if len(s) == 0 { + return nil + } + if err := checkBiDiProhibitedRune(s); err != nil { + return err + } + if hasBiDiRandALCat(s) { + if err := checkBiDiLCat(s); err != nil { + return err + } + if err := checkBadFirstAndLastRandALCat(s); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/xdg/stringprep/doc.go b/vendor/github.com/xdg/stringprep/doc.go new file mode 100644 index 0000000..b319e08 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/doc.go @@ -0,0 +1,10 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package stringprep provides data tables and algorithms for RFC-3454, +// including errata (as of 2018-02). It also provides a profile for +// SASLprep as defined in RFC-4013. +package stringprep diff --git a/vendor/github.com/xdg/stringprep/error.go b/vendor/github.com/xdg/stringprep/error.go new file mode 100644 index 0000000..7403e49 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/error.go @@ -0,0 +1,14 @@ +package stringprep + +import "fmt" + +// Error describes problems encountered during stringprep, including what rune +// was problematic. +type Error struct { + Msg string + Rune rune +} + +func (e Error) Error() string { + return fmt.Sprintf("%s (rune: '\\u%04x')", e.Msg, e.Rune) +} diff --git a/vendor/github.com/xdg/stringprep/map.go b/vendor/github.com/xdg/stringprep/map.go new file mode 100644 index 0000000..e56a0dd --- /dev/null +++ b/vendor/github.com/xdg/stringprep/map.go @@ -0,0 +1,21 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package stringprep + +// Mapping represents a stringprep mapping, from a single rune to zero or more +// runes. +type Mapping map[rune][]rune + +// Map maps a rune to a (possibly empty) rune slice via a stringprep Mapping. +// The ok return value is false if the rune was not found. +func (m Mapping) Map(r rune) (replacement []rune, ok bool) { + rs, ok := m[r] + if !ok { + return nil, false + } + return rs, true +} diff --git a/vendor/github.com/xdg/stringprep/profile.go b/vendor/github.com/xdg/stringprep/profile.go new file mode 100644 index 0000000..5a73be9 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/profile.go @@ -0,0 +1,75 @@ +package stringprep + +import ( + "golang.org/x/text/unicode/norm" +) + +// Profile represents a stringprep profile. +type Profile struct { + Mappings []Mapping + Normalize bool + Prohibits []Set + CheckBiDi bool +} + +var errProhibited = "prohibited character" + +// Prepare transforms an input string to an output string following +// the rules defined in the profile as defined by RFC-3454. +func (p Profile) Prepare(s string) (string, error) { + // Optimistically, assume output will be same length as input + temp := make([]rune, 0, len(s)) + + // Apply maps + for _, r := range s { + rs, ok := p.applyMaps(r) + if ok { + temp = append(temp, rs...) + } else { + temp = append(temp, r) + } + } + + // Normalize + var out string + if p.Normalize { + out = norm.NFKC.String(string(temp)) + } else { + out = string(temp) + } + + // Check prohibited + for _, r := range out { + if p.runeIsProhibited(r) { + return "", Error{Msg: errProhibited, Rune: r} + } + } + + // Check BiDi allowed + if p.CheckBiDi { + if err := passesBiDiRules(out); err != nil { + return "", err + } + } + + return out, nil +} + +func (p Profile) applyMaps(r rune) ([]rune, bool) { + for _, m := range p.Mappings { + rs, ok := m.Map(r) + if ok { + return rs, true + } + } + return nil, false +} + +func (p Profile) runeIsProhibited(r rune) bool { + for _, s := range p.Prohibits { + if s.Contains(r) { + return true + } + } + return false +} diff --git a/vendor/github.com/xdg/stringprep/saslprep.go b/vendor/github.com/xdg/stringprep/saslprep.go new file mode 100644 index 0000000..4001348 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/saslprep.go @@ -0,0 +1,52 @@ +package stringprep + +var mapNonASCIISpaceToASCIISpace = Mapping{ + 0x00A0: []rune{0x0020}, + 0x1680: []rune{0x0020}, + 0x2000: []rune{0x0020}, + 0x2001: []rune{0x0020}, + 0x2002: []rune{0x0020}, + 0x2003: []rune{0x0020}, + 0x2004: []rune{0x0020}, + 0x2005: []rune{0x0020}, + 0x2006: []rune{0x0020}, + 0x2007: []rune{0x0020}, + 0x2008: []rune{0x0020}, + 0x2009: []rune{0x0020}, + 0x200A: []rune{0x0020}, + 0x200B: []rune{0x0020}, + 0x202F: []rune{0x0020}, + 0x205F: []rune{0x0020}, + 0x3000: []rune{0x0020}, +} + +// SASLprep is a pre-defined stringprep profile for user names and passwords +// as described in RFC-4013. +// +// Because the stringprep distinction between query and stored strings was +// intended for compatibility across profile versions, but SASLprep was never +// updated and is now deprecated, this profile only operates in stored +// strings mode, prohibiting unassigned code points. +var SASLprep Profile = saslprep + +var saslprep = Profile{ + Mappings: []Mapping{ + TableB1, + mapNonASCIISpaceToASCIISpace, + }, + Normalize: true, + Prohibits: []Set{ + TableA1, + TableC1_2, + TableC2_1, + TableC2_2, + TableC3, + TableC4, + TableC5, + TableC6, + TableC7, + TableC8, + TableC9, + }, + CheckBiDi: true, +} diff --git a/vendor/github.com/xdg/stringprep/set.go b/vendor/github.com/xdg/stringprep/set.go new file mode 100644 index 0000000..c837e28 --- /dev/null +++ b/vendor/github.com/xdg/stringprep/set.go @@ -0,0 +1,36 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package stringprep + +import "sort" + +// RuneRange represents a close-ended range of runes: [N,M]. For a range +// consisting of a single rune, N and M will be equal. +type RuneRange [2]rune + +// Contains returns true if a rune is within the bounds of the RuneRange. +func (rr RuneRange) Contains(r rune) bool { + return rr[0] <= r && r <= rr[1] +} + +func (rr RuneRange) isAbove(r rune) bool { + return r <= rr[0] +} + +// Set represents a stringprep data table used to identify runes of a +// particular type. +type Set []RuneRange + +// Contains returns true if a rune is within any of the RuneRanges in the +// Set. +func (s Set) Contains(r rune) bool { + i := sort.Search(len(s), func(i int) bool { return s[i].Contains(r) || s[i].isAbove(r) }) + if i < len(s) && s[i].Contains(r) { + return true + } + return false +} diff --git a/vendor/github.com/xdg/stringprep/tables.go b/vendor/github.com/xdg/stringprep/tables.go new file mode 100644 index 0000000..c3fc1fa --- /dev/null +++ b/vendor/github.com/xdg/stringprep/tables.go @@ -0,0 +1,3215 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package stringprep + +var tableA1 = Set{ + RuneRange{0x0221, 0x0221}, + RuneRange{0x0234, 0x024F}, + RuneRange{0x02AE, 0x02AF}, + RuneRange{0x02EF, 0x02FF}, + RuneRange{0x0350, 0x035F}, + RuneRange{0x0370, 0x0373}, + RuneRange{0x0376, 0x0379}, + RuneRange{0x037B, 0x037D}, + RuneRange{0x037F, 0x0383}, + RuneRange{0x038B, 0x038B}, + RuneRange{0x038D, 0x038D}, + RuneRange{0x03A2, 0x03A2}, + RuneRange{0x03CF, 0x03CF}, + RuneRange{0x03F7, 0x03FF}, + RuneRange{0x0487, 0x0487}, + RuneRange{0x04CF, 0x04CF}, + RuneRange{0x04F6, 0x04F7}, + RuneRange{0x04FA, 0x04FF}, + RuneRange{0x0510, 0x0530}, + RuneRange{0x0557, 0x0558}, + RuneRange{0x0560, 0x0560}, + RuneRange{0x0588, 0x0588}, + RuneRange{0x058B, 0x0590}, + RuneRange{0x05A2, 0x05A2}, + RuneRange{0x05BA, 0x05BA}, + RuneRange{0x05C5, 0x05CF}, + RuneRange{0x05EB, 0x05EF}, + RuneRange{0x05F5, 0x060B}, + RuneRange{0x060D, 0x061A}, + RuneRange{0x061C, 0x061E}, + RuneRange{0x0620, 0x0620}, + RuneRange{0x063B, 0x063F}, + RuneRange{0x0656, 0x065F}, + RuneRange{0x06EE, 0x06EF}, + RuneRange{0x06FF, 0x06FF}, + RuneRange{0x070E, 0x070E}, + RuneRange{0x072D, 0x072F}, + RuneRange{0x074B, 0x077F}, + RuneRange{0x07B2, 0x0900}, + RuneRange{0x0904, 0x0904}, + RuneRange{0x093A, 0x093B}, + RuneRange{0x094E, 0x094F}, + RuneRange{0x0955, 0x0957}, + RuneRange{0x0971, 0x0980}, + RuneRange{0x0984, 0x0984}, + RuneRange{0x098D, 0x098E}, + RuneRange{0x0991, 0x0992}, + RuneRange{0x09A9, 0x09A9}, + RuneRange{0x09B1, 0x09B1}, + RuneRange{0x09B3, 0x09B5}, + RuneRange{0x09BA, 0x09BB}, + RuneRange{0x09BD, 0x09BD}, + RuneRange{0x09C5, 0x09C6}, + RuneRange{0x09C9, 0x09CA}, + RuneRange{0x09CE, 0x09D6}, + RuneRange{0x09D8, 0x09DB}, + RuneRange{0x09DE, 0x09DE}, + RuneRange{0x09E4, 0x09E5}, + RuneRange{0x09FB, 0x0A01}, + RuneRange{0x0A03, 0x0A04}, + RuneRange{0x0A0B, 0x0A0E}, + RuneRange{0x0A11, 0x0A12}, + RuneRange{0x0A29, 0x0A29}, + RuneRange{0x0A31, 0x0A31}, + RuneRange{0x0A34, 0x0A34}, + RuneRange{0x0A37, 0x0A37}, + RuneRange{0x0A3A, 0x0A3B}, + RuneRange{0x0A3D, 0x0A3D}, + RuneRange{0x0A43, 0x0A46}, + RuneRange{0x0A49, 0x0A4A}, + RuneRange{0x0A4E, 0x0A58}, + RuneRange{0x0A5D, 0x0A5D}, + RuneRange{0x0A5F, 0x0A65}, + RuneRange{0x0A75, 0x0A80}, + RuneRange{0x0A84, 0x0A84}, + RuneRange{0x0A8C, 0x0A8C}, + RuneRange{0x0A8E, 0x0A8E}, + RuneRange{0x0A92, 0x0A92}, + RuneRange{0x0AA9, 0x0AA9}, + RuneRange{0x0AB1, 0x0AB1}, + RuneRange{0x0AB4, 0x0AB4}, + RuneRange{0x0ABA, 0x0ABB}, + RuneRange{0x0AC6, 0x0AC6}, + RuneRange{0x0ACA, 0x0ACA}, + RuneRange{0x0ACE, 0x0ACF}, + RuneRange{0x0AD1, 0x0ADF}, + RuneRange{0x0AE1, 0x0AE5}, + RuneRange{0x0AF0, 0x0B00}, + RuneRange{0x0B04, 0x0B04}, + RuneRange{0x0B0D, 0x0B0E}, + RuneRange{0x0B11, 0x0B12}, + RuneRange{0x0B29, 0x0B29}, + RuneRange{0x0B31, 0x0B31}, + RuneRange{0x0B34, 0x0B35}, + RuneRange{0x0B3A, 0x0B3B}, + RuneRange{0x0B44, 0x0B46}, + RuneRange{0x0B49, 0x0B4A}, + RuneRange{0x0B4E, 0x0B55}, + RuneRange{0x0B58, 0x0B5B}, + RuneRange{0x0B5E, 0x0B5E}, + RuneRange{0x0B62, 0x0B65}, + RuneRange{0x0B71, 0x0B81}, + RuneRange{0x0B84, 0x0B84}, + RuneRange{0x0B8B, 0x0B8D}, + RuneRange{0x0B91, 0x0B91}, + RuneRange{0x0B96, 0x0B98}, + RuneRange{0x0B9B, 0x0B9B}, + RuneRange{0x0B9D, 0x0B9D}, + RuneRange{0x0BA0, 0x0BA2}, + RuneRange{0x0BA5, 0x0BA7}, + RuneRange{0x0BAB, 0x0BAD}, + RuneRange{0x0BB6, 0x0BB6}, + RuneRange{0x0BBA, 0x0BBD}, + RuneRange{0x0BC3, 0x0BC5}, + RuneRange{0x0BC9, 0x0BC9}, + RuneRange{0x0BCE, 0x0BD6}, + RuneRange{0x0BD8, 0x0BE6}, + RuneRange{0x0BF3, 0x0C00}, + RuneRange{0x0C04, 0x0C04}, + RuneRange{0x0C0D, 0x0C0D}, + RuneRange{0x0C11, 0x0C11}, + RuneRange{0x0C29, 0x0C29}, + RuneRange{0x0C34, 0x0C34}, + RuneRange{0x0C3A, 0x0C3D}, + RuneRange{0x0C45, 0x0C45}, + RuneRange{0x0C49, 0x0C49}, + RuneRange{0x0C4E, 0x0C54}, + RuneRange{0x0C57, 0x0C5F}, + RuneRange{0x0C62, 0x0C65}, + RuneRange{0x0C70, 0x0C81}, + RuneRange{0x0C84, 0x0C84}, + RuneRange{0x0C8D, 0x0C8D}, + RuneRange{0x0C91, 0x0C91}, + RuneRange{0x0CA9, 0x0CA9}, + RuneRange{0x0CB4, 0x0CB4}, + RuneRange{0x0CBA, 0x0CBD}, + RuneRange{0x0CC5, 0x0CC5}, + RuneRange{0x0CC9, 0x0CC9}, + RuneRange{0x0CCE, 0x0CD4}, + RuneRange{0x0CD7, 0x0CDD}, + RuneRange{0x0CDF, 0x0CDF}, + RuneRange{0x0CE2, 0x0CE5}, + RuneRange{0x0CF0, 0x0D01}, + RuneRange{0x0D04, 0x0D04}, + RuneRange{0x0D0D, 0x0D0D}, + RuneRange{0x0D11, 0x0D11}, + RuneRange{0x0D29, 0x0D29}, + RuneRange{0x0D3A, 0x0D3D}, + RuneRange{0x0D44, 0x0D45}, + RuneRange{0x0D49, 0x0D49}, + RuneRange{0x0D4E, 0x0D56}, + RuneRange{0x0D58, 0x0D5F}, + RuneRange{0x0D62, 0x0D65}, + RuneRange{0x0D70, 0x0D81}, + RuneRange{0x0D84, 0x0D84}, + RuneRange{0x0D97, 0x0D99}, + RuneRange{0x0DB2, 0x0DB2}, + RuneRange{0x0DBC, 0x0DBC}, + RuneRange{0x0DBE, 0x0DBF}, + RuneRange{0x0DC7, 0x0DC9}, + RuneRange{0x0DCB, 0x0DCE}, + RuneRange{0x0DD5, 0x0DD5}, + RuneRange{0x0DD7, 0x0DD7}, + RuneRange{0x0DE0, 0x0DF1}, + RuneRange{0x0DF5, 0x0E00}, + RuneRange{0x0E3B, 0x0E3E}, + RuneRange{0x0E5C, 0x0E80}, + RuneRange{0x0E83, 0x0E83}, + RuneRange{0x0E85, 0x0E86}, + RuneRange{0x0E89, 0x0E89}, + RuneRange{0x0E8B, 0x0E8C}, + RuneRange{0x0E8E, 0x0E93}, + RuneRange{0x0E98, 0x0E98}, + RuneRange{0x0EA0, 0x0EA0}, + RuneRange{0x0EA4, 0x0EA4}, + RuneRange{0x0EA6, 0x0EA6}, + RuneRange{0x0EA8, 0x0EA9}, + RuneRange{0x0EAC, 0x0EAC}, + RuneRange{0x0EBA, 0x0EBA}, + RuneRange{0x0EBE, 0x0EBF}, + RuneRange{0x0EC5, 0x0EC5}, + RuneRange{0x0EC7, 0x0EC7}, + RuneRange{0x0ECE, 0x0ECF}, + RuneRange{0x0EDA, 0x0EDB}, + RuneRange{0x0EDE, 0x0EFF}, + RuneRange{0x0F48, 0x0F48}, + RuneRange{0x0F6B, 0x0F70}, + RuneRange{0x0F8C, 0x0F8F}, + RuneRange{0x0F98, 0x0F98}, + RuneRange{0x0FBD, 0x0FBD}, + RuneRange{0x0FCD, 0x0FCE}, + RuneRange{0x0FD0, 0x0FFF}, + RuneRange{0x1022, 0x1022}, + RuneRange{0x1028, 0x1028}, + RuneRange{0x102B, 0x102B}, + RuneRange{0x1033, 0x1035}, + RuneRange{0x103A, 0x103F}, + RuneRange{0x105A, 0x109F}, + RuneRange{0x10C6, 0x10CF}, + RuneRange{0x10F9, 0x10FA}, + RuneRange{0x10FC, 0x10FF}, + RuneRange{0x115A, 0x115E}, + RuneRange{0x11A3, 0x11A7}, + RuneRange{0x11FA, 0x11FF}, + RuneRange{0x1207, 0x1207}, + RuneRange{0x1247, 0x1247}, + RuneRange{0x1249, 0x1249}, + RuneRange{0x124E, 0x124F}, + RuneRange{0x1257, 0x1257}, + RuneRange{0x1259, 0x1259}, + RuneRange{0x125E, 0x125F}, + RuneRange{0x1287, 0x1287}, + RuneRange{0x1289, 0x1289}, + RuneRange{0x128E, 0x128F}, + RuneRange{0x12AF, 0x12AF}, + RuneRange{0x12B1, 0x12B1}, + RuneRange{0x12B6, 0x12B7}, + RuneRange{0x12BF, 0x12BF}, + RuneRange{0x12C1, 0x12C1}, + RuneRange{0x12C6, 0x12C7}, + RuneRange{0x12CF, 0x12CF}, + RuneRange{0x12D7, 0x12D7}, + RuneRange{0x12EF, 0x12EF}, + RuneRange{0x130F, 0x130F}, + RuneRange{0x1311, 0x1311}, + RuneRange{0x1316, 0x1317}, + RuneRange{0x131F, 0x131F}, + RuneRange{0x1347, 0x1347}, + RuneRange{0x135B, 0x1360}, + RuneRange{0x137D, 0x139F}, + RuneRange{0x13F5, 0x1400}, + RuneRange{0x1677, 0x167F}, + RuneRange{0x169D, 0x169F}, + RuneRange{0x16F1, 0x16FF}, + RuneRange{0x170D, 0x170D}, + RuneRange{0x1715, 0x171F}, + RuneRange{0x1737, 0x173F}, + RuneRange{0x1754, 0x175F}, + RuneRange{0x176D, 0x176D}, + RuneRange{0x1771, 0x1771}, + RuneRange{0x1774, 0x177F}, + RuneRange{0x17DD, 0x17DF}, + RuneRange{0x17EA, 0x17FF}, + RuneRange{0x180F, 0x180F}, + RuneRange{0x181A, 0x181F}, + RuneRange{0x1878, 0x187F}, + RuneRange{0x18AA, 0x1DFF}, + RuneRange{0x1E9C, 0x1E9F}, + RuneRange{0x1EFA, 0x1EFF}, + RuneRange{0x1F16, 0x1F17}, + RuneRange{0x1F1E, 0x1F1F}, + RuneRange{0x1F46, 0x1F47}, + RuneRange{0x1F4E, 0x1F4F}, + RuneRange{0x1F58, 0x1F58}, + RuneRange{0x1F5A, 0x1F5A}, + RuneRange{0x1F5C, 0x1F5C}, + RuneRange{0x1F5E, 0x1F5E}, + RuneRange{0x1F7E, 0x1F7F}, + RuneRange{0x1FB5, 0x1FB5}, + RuneRange{0x1FC5, 0x1FC5}, + RuneRange{0x1FD4, 0x1FD5}, + RuneRange{0x1FDC, 0x1FDC}, + RuneRange{0x1FF0, 0x1FF1}, + RuneRange{0x1FF5, 0x1FF5}, + RuneRange{0x1FFF, 0x1FFF}, + RuneRange{0x2053, 0x2056}, + RuneRange{0x2058, 0x205E}, + RuneRange{0x2064, 0x2069}, + RuneRange{0x2072, 0x2073}, + RuneRange{0x208F, 0x209F}, + RuneRange{0x20B2, 0x20CF}, + RuneRange{0x20EB, 0x20FF}, + RuneRange{0x213B, 0x213C}, + RuneRange{0x214C, 0x2152}, + RuneRange{0x2184, 0x218F}, + RuneRange{0x23CF, 0x23FF}, + RuneRange{0x2427, 0x243F}, + RuneRange{0x244B, 0x245F}, + RuneRange{0x24FF, 0x24FF}, + RuneRange{0x2614, 0x2615}, + RuneRange{0x2618, 0x2618}, + RuneRange{0x267E, 0x267F}, + RuneRange{0x268A, 0x2700}, + RuneRange{0x2705, 0x2705}, + RuneRange{0x270A, 0x270B}, + RuneRange{0x2728, 0x2728}, + RuneRange{0x274C, 0x274C}, + RuneRange{0x274E, 0x274E}, + RuneRange{0x2753, 0x2755}, + RuneRange{0x2757, 0x2757}, + RuneRange{0x275F, 0x2760}, + RuneRange{0x2795, 0x2797}, + RuneRange{0x27B0, 0x27B0}, + RuneRange{0x27BF, 0x27CF}, + RuneRange{0x27EC, 0x27EF}, + RuneRange{0x2B00, 0x2E7F}, + RuneRange{0x2E9A, 0x2E9A}, + RuneRange{0x2EF4, 0x2EFF}, + RuneRange{0x2FD6, 0x2FEF}, + RuneRange{0x2FFC, 0x2FFF}, + RuneRange{0x3040, 0x3040}, + RuneRange{0x3097, 0x3098}, + RuneRange{0x3100, 0x3104}, + RuneRange{0x312D, 0x3130}, + RuneRange{0x318F, 0x318F}, + RuneRange{0x31B8, 0x31EF}, + RuneRange{0x321D, 0x321F}, + RuneRange{0x3244, 0x3250}, + RuneRange{0x327C, 0x327E}, + RuneRange{0x32CC, 0x32CF}, + RuneRange{0x32FF, 0x32FF}, + RuneRange{0x3377, 0x337A}, + RuneRange{0x33DE, 0x33DF}, + RuneRange{0x33FF, 0x33FF}, + RuneRange{0x4DB6, 0x4DFF}, + RuneRange{0x9FA6, 0x9FFF}, + RuneRange{0xA48D, 0xA48F}, + RuneRange{0xA4C7, 0xABFF}, + RuneRange{0xD7A4, 0xD7FF}, + RuneRange{0xFA2E, 0xFA2F}, + RuneRange{0xFA6B, 0xFAFF}, + RuneRange{0xFB07, 0xFB12}, + RuneRange{0xFB18, 0xFB1C}, + RuneRange{0xFB37, 0xFB37}, + RuneRange{0xFB3D, 0xFB3D}, + RuneRange{0xFB3F, 0xFB3F}, + RuneRange{0xFB42, 0xFB42}, + RuneRange{0xFB45, 0xFB45}, + RuneRange{0xFBB2, 0xFBD2}, + RuneRange{0xFD40, 0xFD4F}, + RuneRange{0xFD90, 0xFD91}, + RuneRange{0xFDC8, 0xFDCF}, + RuneRange{0xFDFD, 0xFDFF}, + RuneRange{0xFE10, 0xFE1F}, + RuneRange{0xFE24, 0xFE2F}, + RuneRange{0xFE47, 0xFE48}, + RuneRange{0xFE53, 0xFE53}, + RuneRange{0xFE67, 0xFE67}, + RuneRange{0xFE6C, 0xFE6F}, + RuneRange{0xFE75, 0xFE75}, + RuneRange{0xFEFD, 0xFEFE}, + RuneRange{0xFF00, 0xFF00}, + RuneRange{0xFFBF, 0xFFC1}, + RuneRange{0xFFC8, 0xFFC9}, + RuneRange{0xFFD0, 0xFFD1}, + RuneRange{0xFFD8, 0xFFD9}, + RuneRange{0xFFDD, 0xFFDF}, + RuneRange{0xFFE7, 0xFFE7}, + RuneRange{0xFFEF, 0xFFF8}, + RuneRange{0x10000, 0x102FF}, + RuneRange{0x1031F, 0x1031F}, + RuneRange{0x10324, 0x1032F}, + RuneRange{0x1034B, 0x103FF}, + RuneRange{0x10426, 0x10427}, + RuneRange{0x1044E, 0x1CFFF}, + RuneRange{0x1D0F6, 0x1D0FF}, + RuneRange{0x1D127, 0x1D129}, + RuneRange{0x1D1DE, 0x1D3FF}, + RuneRange{0x1D455, 0x1D455}, + RuneRange{0x1D49D, 0x1D49D}, + RuneRange{0x1D4A0, 0x1D4A1}, + RuneRange{0x1D4A3, 0x1D4A4}, + RuneRange{0x1D4A7, 0x1D4A8}, + RuneRange{0x1D4AD, 0x1D4AD}, + RuneRange{0x1D4BA, 0x1D4BA}, + RuneRange{0x1D4BC, 0x1D4BC}, + RuneRange{0x1D4C1, 0x1D4C1}, + RuneRange{0x1D4C4, 0x1D4C4}, + RuneRange{0x1D506, 0x1D506}, + RuneRange{0x1D50B, 0x1D50C}, + RuneRange{0x1D515, 0x1D515}, + RuneRange{0x1D51D, 0x1D51D}, + RuneRange{0x1D53A, 0x1D53A}, + RuneRange{0x1D53F, 0x1D53F}, + RuneRange{0x1D545, 0x1D545}, + RuneRange{0x1D547, 0x1D549}, + RuneRange{0x1D551, 0x1D551}, + RuneRange{0x1D6A4, 0x1D6A7}, + RuneRange{0x1D7CA, 0x1D7CD}, + RuneRange{0x1D800, 0x1FFFD}, + RuneRange{0x2A6D7, 0x2F7FF}, + RuneRange{0x2FA1E, 0x2FFFD}, + RuneRange{0x30000, 0x3FFFD}, + RuneRange{0x40000, 0x4FFFD}, + RuneRange{0x50000, 0x5FFFD}, + RuneRange{0x60000, 0x6FFFD}, + RuneRange{0x70000, 0x7FFFD}, + RuneRange{0x80000, 0x8FFFD}, + RuneRange{0x90000, 0x9FFFD}, + RuneRange{0xA0000, 0xAFFFD}, + RuneRange{0xB0000, 0xBFFFD}, + RuneRange{0xC0000, 0xCFFFD}, + RuneRange{0xD0000, 0xDFFFD}, + RuneRange{0xE0000, 0xE0000}, + RuneRange{0xE0002, 0xE001F}, + RuneRange{0xE0080, 0xEFFFD}, +} + +// TableA1 represents RFC-3454 Table A.1. +var TableA1 Set = tableA1 + +var tableB1 = Mapping{ + 0x00AD: []rune{}, // Map to nothing + 0x034F: []rune{}, // Map to nothing + 0x180B: []rune{}, // Map to nothing + 0x180C: []rune{}, // Map to nothing + 0x180D: []rune{}, // Map to nothing + 0x200B: []rune{}, // Map to nothing + 0x200C: []rune{}, // Map to nothing + 0x200D: []rune{}, // Map to nothing + 0x2060: []rune{}, // Map to nothing + 0xFE00: []rune{}, // Map to nothing + 0xFE01: []rune{}, // Map to nothing + 0xFE02: []rune{}, // Map to nothing + 0xFE03: []rune{}, // Map to nothing + 0xFE04: []rune{}, // Map to nothing + 0xFE05: []rune{}, // Map to nothing + 0xFE06: []rune{}, // Map to nothing + 0xFE07: []rune{}, // Map to nothing + 0xFE08: []rune{}, // Map to nothing + 0xFE09: []rune{}, // Map to nothing + 0xFE0A: []rune{}, // Map to nothing + 0xFE0B: []rune{}, // Map to nothing + 0xFE0C: []rune{}, // Map to nothing + 0xFE0D: []rune{}, // Map to nothing + 0xFE0E: []rune{}, // Map to nothing + 0xFE0F: []rune{}, // Map to nothing + 0xFEFF: []rune{}, // Map to nothing +} + +// TableB1 represents RFC-3454 Table B.1. +var TableB1 Mapping = tableB1 + +var tableB2 = Mapping{ + 0x0041: []rune{0x0061}, // Case map + 0x0042: []rune{0x0062}, // Case map + 0x0043: []rune{0x0063}, // Case map + 0x0044: []rune{0x0064}, // Case map + 0x0045: []rune{0x0065}, // Case map + 0x0046: []rune{0x0066}, // Case map + 0x0047: []rune{0x0067}, // Case map + 0x0048: []rune{0x0068}, // Case map + 0x0049: []rune{0x0069}, // Case map + 0x004A: []rune{0x006A}, // Case map + 0x004B: []rune{0x006B}, // Case map + 0x004C: []rune{0x006C}, // Case map + 0x004D: []rune{0x006D}, // Case map + 0x004E: []rune{0x006E}, // Case map + 0x004F: []rune{0x006F}, // Case map + 0x0050: []rune{0x0070}, // Case map + 0x0051: []rune{0x0071}, // Case map + 0x0052: []rune{0x0072}, // Case map + 0x0053: []rune{0x0073}, // Case map + 0x0054: []rune{0x0074}, // Case map + 0x0055: []rune{0x0075}, // Case map + 0x0056: []rune{0x0076}, // Case map + 0x0057: []rune{0x0077}, // Case map + 0x0058: []rune{0x0078}, // Case map + 0x0059: []rune{0x0079}, // Case map + 0x005A: []rune{0x007A}, // Case map + 0x00B5: []rune{0x03BC}, // Case map + 0x00C0: []rune{0x00E0}, // Case map + 0x00C1: []rune{0x00E1}, // Case map + 0x00C2: []rune{0x00E2}, // Case map + 0x00C3: []rune{0x00E3}, // Case map + 0x00C4: []rune{0x00E4}, // Case map + 0x00C5: []rune{0x00E5}, // Case map + 0x00C6: []rune{0x00E6}, // Case map + 0x00C7: []rune{0x00E7}, // Case map + 0x00C8: []rune{0x00E8}, // Case map + 0x00C9: []rune{0x00E9}, // Case map + 0x00CA: []rune{0x00EA}, // Case map + 0x00CB: []rune{0x00EB}, // Case map + 0x00CC: []rune{0x00EC}, // Case map + 0x00CD: []rune{0x00ED}, // Case map + 0x00CE: []rune{0x00EE}, // Case map + 0x00CF: []rune{0x00EF}, // Case map + 0x00D0: []rune{0x00F0}, // Case map + 0x00D1: []rune{0x00F1}, // Case map + 0x00D2: []rune{0x00F2}, // Case map + 0x00D3: []rune{0x00F3}, // Case map + 0x00D4: []rune{0x00F4}, // Case map + 0x00D5: []rune{0x00F5}, // Case map + 0x00D6: []rune{0x00F6}, // Case map + 0x00D8: []rune{0x00F8}, // Case map + 0x00D9: []rune{0x00F9}, // Case map + 0x00DA: []rune{0x00FA}, // Case map + 0x00DB: []rune{0x00FB}, // Case map + 0x00DC: []rune{0x00FC}, // Case map + 0x00DD: []rune{0x00FD}, // Case map + 0x00DE: []rune{0x00FE}, // Case map + 0x00DF: []rune{0x0073, 0x0073}, // Case map + 0x0100: []rune{0x0101}, // Case map + 0x0102: []rune{0x0103}, // Case map + 0x0104: []rune{0x0105}, // Case map + 0x0106: []rune{0x0107}, // Case map + 0x0108: []rune{0x0109}, // Case map + 0x010A: []rune{0x010B}, // Case map + 0x010C: []rune{0x010D}, // Case map + 0x010E: []rune{0x010F}, // Case map + 0x0110: []rune{0x0111}, // Case map + 0x0112: []rune{0x0113}, // Case map + 0x0114: []rune{0x0115}, // Case map + 0x0116: []rune{0x0117}, // Case map + 0x0118: []rune{0x0119}, // Case map + 0x011A: []rune{0x011B}, // Case map + 0x011C: []rune{0x011D}, // Case map + 0x011E: []rune{0x011F}, // Case map + 0x0120: []rune{0x0121}, // Case map + 0x0122: []rune{0x0123}, // Case map + 0x0124: []rune{0x0125}, // Case map + 0x0126: []rune{0x0127}, // Case map + 0x0128: []rune{0x0129}, // Case map + 0x012A: []rune{0x012B}, // Case map + 0x012C: []rune{0x012D}, // Case map + 0x012E: []rune{0x012F}, // Case map + 0x0130: []rune{0x0069, 0x0307}, // Case map + 0x0132: []rune{0x0133}, // Case map + 0x0134: []rune{0x0135}, // Case map + 0x0136: []rune{0x0137}, // Case map + 0x0139: []rune{0x013A}, // Case map + 0x013B: []rune{0x013C}, // Case map + 0x013D: []rune{0x013E}, // Case map + 0x013F: []rune{0x0140}, // Case map + 0x0141: []rune{0x0142}, // Case map + 0x0143: []rune{0x0144}, // Case map + 0x0145: []rune{0x0146}, // Case map + 0x0147: []rune{0x0148}, // Case map + 0x0149: []rune{0x02BC, 0x006E}, // Case map + 0x014A: []rune{0x014B}, // Case map + 0x014C: []rune{0x014D}, // Case map + 0x014E: []rune{0x014F}, // Case map + 0x0150: []rune{0x0151}, // Case map + 0x0152: []rune{0x0153}, // Case map + 0x0154: []rune{0x0155}, // Case map + 0x0156: []rune{0x0157}, // Case map + 0x0158: []rune{0x0159}, // Case map + 0x015A: []rune{0x015B}, // Case map + 0x015C: []rune{0x015D}, // Case map + 0x015E: []rune{0x015F}, // Case map + 0x0160: []rune{0x0161}, // Case map + 0x0162: []rune{0x0163}, // Case map + 0x0164: []rune{0x0165}, // Case map + 0x0166: []rune{0x0167}, // Case map + 0x0168: []rune{0x0169}, // Case map + 0x016A: []rune{0x016B}, // Case map + 0x016C: []rune{0x016D}, // Case map + 0x016E: []rune{0x016F}, // Case map + 0x0170: []rune{0x0171}, // Case map + 0x0172: []rune{0x0173}, // Case map + 0x0174: []rune{0x0175}, // Case map + 0x0176: []rune{0x0177}, // Case map + 0x0178: []rune{0x00FF}, // Case map + 0x0179: []rune{0x017A}, // Case map + 0x017B: []rune{0x017C}, // Case map + 0x017D: []rune{0x017E}, // Case map + 0x017F: []rune{0x0073}, // Case map + 0x0181: []rune{0x0253}, // Case map + 0x0182: []rune{0x0183}, // Case map + 0x0184: []rune{0x0185}, // Case map + 0x0186: []rune{0x0254}, // Case map + 0x0187: []rune{0x0188}, // Case map + 0x0189: []rune{0x0256}, // Case map + 0x018A: []rune{0x0257}, // Case map + 0x018B: []rune{0x018C}, // Case map + 0x018E: []rune{0x01DD}, // Case map + 0x018F: []rune{0x0259}, // Case map + 0x0190: []rune{0x025B}, // Case map + 0x0191: []rune{0x0192}, // Case map + 0x0193: []rune{0x0260}, // Case map + 0x0194: []rune{0x0263}, // Case map + 0x0196: []rune{0x0269}, // Case map + 0x0197: []rune{0x0268}, // Case map + 0x0198: []rune{0x0199}, // Case map + 0x019C: []rune{0x026F}, // Case map + 0x019D: []rune{0x0272}, // Case map + 0x019F: []rune{0x0275}, // Case map + 0x01A0: []rune{0x01A1}, // Case map + 0x01A2: []rune{0x01A3}, // Case map + 0x01A4: []rune{0x01A5}, // Case map + 0x01A6: []rune{0x0280}, // Case map + 0x01A7: []rune{0x01A8}, // Case map + 0x01A9: []rune{0x0283}, // Case map + 0x01AC: []rune{0x01AD}, // Case map + 0x01AE: []rune{0x0288}, // Case map + 0x01AF: []rune{0x01B0}, // Case map + 0x01B1: []rune{0x028A}, // Case map + 0x01B2: []rune{0x028B}, // Case map + 0x01B3: []rune{0x01B4}, // Case map + 0x01B5: []rune{0x01B6}, // Case map + 0x01B7: []rune{0x0292}, // Case map + 0x01B8: []rune{0x01B9}, // Case map + 0x01BC: []rune{0x01BD}, // Case map + 0x01C4: []rune{0x01C6}, // Case map + 0x01C5: []rune{0x01C6}, // Case map + 0x01C7: []rune{0x01C9}, // Case map + 0x01C8: []rune{0x01C9}, // Case map + 0x01CA: []rune{0x01CC}, // Case map + 0x01CB: []rune{0x01CC}, // Case map + 0x01CD: []rune{0x01CE}, // Case map + 0x01CF: []rune{0x01D0}, // Case map + 0x01D1: []rune{0x01D2}, // Case map + 0x01D3: []rune{0x01D4}, // Case map + 0x01D5: []rune{0x01D6}, // Case map + 0x01D7: []rune{0x01D8}, // Case map + 0x01D9: []rune{0x01DA}, // Case map + 0x01DB: []rune{0x01DC}, // Case map + 0x01DE: []rune{0x01DF}, // Case map + 0x01E0: []rune{0x01E1}, // Case map + 0x01E2: []rune{0x01E3}, // Case map + 0x01E4: []rune{0x01E5}, // Case map + 0x01E6: []rune{0x01E7}, // Case map + 0x01E8: []rune{0x01E9}, // Case map + 0x01EA: []rune{0x01EB}, // Case map + 0x01EC: []rune{0x01ED}, // Case map + 0x01EE: []rune{0x01EF}, // Case map + 0x01F0: []rune{0x006A, 0x030C}, // Case map + 0x01F1: []rune{0x01F3}, // Case map + 0x01F2: []rune{0x01F3}, // Case map + 0x01F4: []rune{0x01F5}, // Case map + 0x01F6: []rune{0x0195}, // Case map + 0x01F7: []rune{0x01BF}, // Case map + 0x01F8: []rune{0x01F9}, // Case map + 0x01FA: []rune{0x01FB}, // Case map + 0x01FC: []rune{0x01FD}, // Case map + 0x01FE: []rune{0x01FF}, // Case map + 0x0200: []rune{0x0201}, // Case map + 0x0202: []rune{0x0203}, // Case map + 0x0204: []rune{0x0205}, // Case map + 0x0206: []rune{0x0207}, // Case map + 0x0208: []rune{0x0209}, // Case map + 0x020A: []rune{0x020B}, // Case map + 0x020C: []rune{0x020D}, // Case map + 0x020E: []rune{0x020F}, // Case map + 0x0210: []rune{0x0211}, // Case map + 0x0212: []rune{0x0213}, // Case map + 0x0214: []rune{0x0215}, // Case map + 0x0216: []rune{0x0217}, // Case map + 0x0218: []rune{0x0219}, // Case map + 0x021A: []rune{0x021B}, // Case map + 0x021C: []rune{0x021D}, // Case map + 0x021E: []rune{0x021F}, // Case map + 0x0220: []rune{0x019E}, // Case map + 0x0222: []rune{0x0223}, // Case map + 0x0224: []rune{0x0225}, // Case map + 0x0226: []rune{0x0227}, // Case map + 0x0228: []rune{0x0229}, // Case map + 0x022A: []rune{0x022B}, // Case map + 0x022C: []rune{0x022D}, // Case map + 0x022E: []rune{0x022F}, // Case map + 0x0230: []rune{0x0231}, // Case map + 0x0232: []rune{0x0233}, // Case map + 0x0345: []rune{0x03B9}, // Case map + 0x037A: []rune{0x0020, 0x03B9}, // Additional folding + 0x0386: []rune{0x03AC}, // Case map + 0x0388: []rune{0x03AD}, // Case map + 0x0389: []rune{0x03AE}, // Case map + 0x038A: []rune{0x03AF}, // Case map + 0x038C: []rune{0x03CC}, // Case map + 0x038E: []rune{0x03CD}, // Case map + 0x038F: []rune{0x03CE}, // Case map + 0x0390: []rune{0x03B9, 0x0308, 0x0301}, // Case map + 0x0391: []rune{0x03B1}, // Case map + 0x0392: []rune{0x03B2}, // Case map + 0x0393: []rune{0x03B3}, // Case map + 0x0394: []rune{0x03B4}, // Case map + 0x0395: []rune{0x03B5}, // Case map + 0x0396: []rune{0x03B6}, // Case map + 0x0397: []rune{0x03B7}, // Case map + 0x0398: []rune{0x03B8}, // Case map + 0x0399: []rune{0x03B9}, // Case map + 0x039A: []rune{0x03BA}, // Case map + 0x039B: []rune{0x03BB}, // Case map + 0x039C: []rune{0x03BC}, // Case map + 0x039D: []rune{0x03BD}, // Case map + 0x039E: []rune{0x03BE}, // Case map + 0x039F: []rune{0x03BF}, // Case map + 0x03A0: []rune{0x03C0}, // Case map + 0x03A1: []rune{0x03C1}, // Case map + 0x03A3: []rune{0x03C3}, // Case map + 0x03A4: []rune{0x03C4}, // Case map + 0x03A5: []rune{0x03C5}, // Case map + 0x03A6: []rune{0x03C6}, // Case map + 0x03A7: []rune{0x03C7}, // Case map + 0x03A8: []rune{0x03C8}, // Case map + 0x03A9: []rune{0x03C9}, // Case map + 0x03AA: []rune{0x03CA}, // Case map + 0x03AB: []rune{0x03CB}, // Case map + 0x03B0: []rune{0x03C5, 0x0308, 0x0301}, // Case map + 0x03C2: []rune{0x03C3}, // Case map + 0x03D0: []rune{0x03B2}, // Case map + 0x03D1: []rune{0x03B8}, // Case map + 0x03D2: []rune{0x03C5}, // Additional folding + 0x03D3: []rune{0x03CD}, // Additional folding + 0x03D4: []rune{0x03CB}, // Additional folding + 0x03D5: []rune{0x03C6}, // Case map + 0x03D6: []rune{0x03C0}, // Case map + 0x03D8: []rune{0x03D9}, // Case map + 0x03DA: []rune{0x03DB}, // Case map + 0x03DC: []rune{0x03DD}, // Case map + 0x03DE: []rune{0x03DF}, // Case map + 0x03E0: []rune{0x03E1}, // Case map + 0x03E2: []rune{0x03E3}, // Case map + 0x03E4: []rune{0x03E5}, // Case map + 0x03E6: []rune{0x03E7}, // Case map + 0x03E8: []rune{0x03E9}, // Case map + 0x03EA: []rune{0x03EB}, // Case map + 0x03EC: []rune{0x03ED}, // Case map + 0x03EE: []rune{0x03EF}, // Case map + 0x03F0: []rune{0x03BA}, // Case map + 0x03F1: []rune{0x03C1}, // Case map + 0x03F2: []rune{0x03C3}, // Case map + 0x03F4: []rune{0x03B8}, // Case map + 0x03F5: []rune{0x03B5}, // Case map + 0x0400: []rune{0x0450}, // Case map + 0x0401: []rune{0x0451}, // Case map + 0x0402: []rune{0x0452}, // Case map + 0x0403: []rune{0x0453}, // Case map + 0x0404: []rune{0x0454}, // Case map + 0x0405: []rune{0x0455}, // Case map + 0x0406: []rune{0x0456}, // Case map + 0x0407: []rune{0x0457}, // Case map + 0x0408: []rune{0x0458}, // Case map + 0x0409: []rune{0x0459}, // Case map + 0x040A: []rune{0x045A}, // Case map + 0x040B: []rune{0x045B}, // Case map + 0x040C: []rune{0x045C}, // Case map + 0x040D: []rune{0x045D}, // Case map + 0x040E: []rune{0x045E}, // Case map + 0x040F: []rune{0x045F}, // Case map + 0x0410: []rune{0x0430}, // Case map + 0x0411: []rune{0x0431}, // Case map + 0x0412: []rune{0x0432}, // Case map + 0x0413: []rune{0x0433}, // Case map + 0x0414: []rune{0x0434}, // Case map + 0x0415: []rune{0x0435}, // Case map + 0x0416: []rune{0x0436}, // Case map + 0x0417: []rune{0x0437}, // Case map + 0x0418: []rune{0x0438}, // Case map + 0x0419: []rune{0x0439}, // Case map + 0x041A: []rune{0x043A}, // Case map + 0x041B: []rune{0x043B}, // Case map + 0x041C: []rune{0x043C}, // Case map + 0x041D: []rune{0x043D}, // Case map + 0x041E: []rune{0x043E}, // Case map + 0x041F: []rune{0x043F}, // Case map + 0x0420: []rune{0x0440}, // Case map + 0x0421: []rune{0x0441}, // Case map + 0x0422: []rune{0x0442}, // Case map + 0x0423: []rune{0x0443}, // Case map + 0x0424: []rune{0x0444}, // Case map + 0x0425: []rune{0x0445}, // Case map + 0x0426: []rune{0x0446}, // Case map + 0x0427: []rune{0x0447}, // Case map + 0x0428: []rune{0x0448}, // Case map + 0x0429: []rune{0x0449}, // Case map + 0x042A: []rune{0x044A}, // Case map + 0x042B: []rune{0x044B}, // Case map + 0x042C: []rune{0x044C}, // Case map + 0x042D: []rune{0x044D}, // Case map + 0x042E: []rune{0x044E}, // Case map + 0x042F: []rune{0x044F}, // Case map + 0x0460: []rune{0x0461}, // Case map + 0x0462: []rune{0x0463}, // Case map + 0x0464: []rune{0x0465}, // Case map + 0x0466: []rune{0x0467}, // Case map + 0x0468: []rune{0x0469}, // Case map + 0x046A: []rune{0x046B}, // Case map + 0x046C: []rune{0x046D}, // Case map + 0x046E: []rune{0x046F}, // Case map + 0x0470: []rune{0x0471}, // Case map + 0x0472: []rune{0x0473}, // Case map + 0x0474: []rune{0x0475}, // Case map + 0x0476: []rune{0x0477}, // Case map + 0x0478: []rune{0x0479}, // Case map + 0x047A: []rune{0x047B}, // Case map + 0x047C: []rune{0x047D}, // Case map + 0x047E: []rune{0x047F}, // Case map + 0x0480: []rune{0x0481}, // Case map + 0x048A: []rune{0x048B}, // Case map + 0x048C: []rune{0x048D}, // Case map + 0x048E: []rune{0x048F}, // Case map + 0x0490: []rune{0x0491}, // Case map + 0x0492: []rune{0x0493}, // Case map + 0x0494: []rune{0x0495}, // Case map + 0x0496: []rune{0x0497}, // Case map + 0x0498: []rune{0x0499}, // Case map + 0x049A: []rune{0x049B}, // Case map + 0x049C: []rune{0x049D}, // Case map + 0x049E: []rune{0x049F}, // Case map + 0x04A0: []rune{0x04A1}, // Case map + 0x04A2: []rune{0x04A3}, // Case map + 0x04A4: []rune{0x04A5}, // Case map + 0x04A6: []rune{0x04A7}, // Case map + 0x04A8: []rune{0x04A9}, // Case map + 0x04AA: []rune{0x04AB}, // Case map + 0x04AC: []rune{0x04AD}, // Case map + 0x04AE: []rune{0x04AF}, // Case map + 0x04B0: []rune{0x04B1}, // Case map + 0x04B2: []rune{0x04B3}, // Case map + 0x04B4: []rune{0x04B5}, // Case map + 0x04B6: []rune{0x04B7}, // Case map + 0x04B8: []rune{0x04B9}, // Case map + 0x04BA: []rune{0x04BB}, // Case map + 0x04BC: []rune{0x04BD}, // Case map + 0x04BE: []rune{0x04BF}, // Case map + 0x04C1: []rune{0x04C2}, // Case map + 0x04C3: []rune{0x04C4}, // Case map + 0x04C5: []rune{0x04C6}, // Case map + 0x04C7: []rune{0x04C8}, // Case map + 0x04C9: []rune{0x04CA}, // Case map + 0x04CB: []rune{0x04CC}, // Case map + 0x04CD: []rune{0x04CE}, // Case map + 0x04D0: []rune{0x04D1}, // Case map + 0x04D2: []rune{0x04D3}, // Case map + 0x04D4: []rune{0x04D5}, // Case map + 0x04D6: []rune{0x04D7}, // Case map + 0x04D8: []rune{0x04D9}, // Case map + 0x04DA: []rune{0x04DB}, // Case map + 0x04DC: []rune{0x04DD}, // Case map + 0x04DE: []rune{0x04DF}, // Case map + 0x04E0: []rune{0x04E1}, // Case map + 0x04E2: []rune{0x04E3}, // Case map + 0x04E4: []rune{0x04E5}, // Case map + 0x04E6: []rune{0x04E7}, // Case map + 0x04E8: []rune{0x04E9}, // Case map + 0x04EA: []rune{0x04EB}, // Case map + 0x04EC: []rune{0x04ED}, // Case map + 0x04EE: []rune{0x04EF}, // Case map + 0x04F0: []rune{0x04F1}, // Case map + 0x04F2: []rune{0x04F3}, // Case map + 0x04F4: []rune{0x04F5}, // Case map + 0x04F8: []rune{0x04F9}, // Case map + 0x0500: []rune{0x0501}, // Case map + 0x0502: []rune{0x0503}, // Case map + 0x0504: []rune{0x0505}, // Case map + 0x0506: []rune{0x0507}, // Case map + 0x0508: []rune{0x0509}, // Case map + 0x050A: []rune{0x050B}, // Case map + 0x050C: []rune{0x050D}, // Case map + 0x050E: []rune{0x050F}, // Case map + 0x0531: []rune{0x0561}, // Case map + 0x0532: []rune{0x0562}, // Case map + 0x0533: []rune{0x0563}, // Case map + 0x0534: []rune{0x0564}, // Case map + 0x0535: []rune{0x0565}, // Case map + 0x0536: []rune{0x0566}, // Case map + 0x0537: []rune{0x0567}, // Case map + 0x0538: []rune{0x0568}, // Case map + 0x0539: []rune{0x0569}, // Case map + 0x053A: []rune{0x056A}, // Case map + 0x053B: []rune{0x056B}, // Case map + 0x053C: []rune{0x056C}, // Case map + 0x053D: []rune{0x056D}, // Case map + 0x053E: []rune{0x056E}, // Case map + 0x053F: []rune{0x056F}, // Case map + 0x0540: []rune{0x0570}, // Case map + 0x0541: []rune{0x0571}, // Case map + 0x0542: []rune{0x0572}, // Case map + 0x0543: []rune{0x0573}, // Case map + 0x0544: []rune{0x0574}, // Case map + 0x0545: []rune{0x0575}, // Case map + 0x0546: []rune{0x0576}, // Case map + 0x0547: []rune{0x0577}, // Case map + 0x0548: []rune{0x0578}, // Case map + 0x0549: []rune{0x0579}, // Case map + 0x054A: []rune{0x057A}, // Case map + 0x054B: []rune{0x057B}, // Case map + 0x054C: []rune{0x057C}, // Case map + 0x054D: []rune{0x057D}, // Case map + 0x054E: []rune{0x057E}, // Case map + 0x054F: []rune{0x057F}, // Case map + 0x0550: []rune{0x0580}, // Case map + 0x0551: []rune{0x0581}, // Case map + 0x0552: []rune{0x0582}, // Case map + 0x0553: []rune{0x0583}, // Case map + 0x0554: []rune{0x0584}, // Case map + 0x0555: []rune{0x0585}, // Case map + 0x0556: []rune{0x0586}, // Case map + 0x0587: []rune{0x0565, 0x0582}, // Case map + 0x1E00: []rune{0x1E01}, // Case map + 0x1E02: []rune{0x1E03}, // Case map + 0x1E04: []rune{0x1E05}, // Case map + 0x1E06: []rune{0x1E07}, // Case map + 0x1E08: []rune{0x1E09}, // Case map + 0x1E0A: []rune{0x1E0B}, // Case map + 0x1E0C: []rune{0x1E0D}, // Case map + 0x1E0E: []rune{0x1E0F}, // Case map + 0x1E10: []rune{0x1E11}, // Case map + 0x1E12: []rune{0x1E13}, // Case map + 0x1E14: []rune{0x1E15}, // Case map + 0x1E16: []rune{0x1E17}, // Case map + 0x1E18: []rune{0x1E19}, // Case map + 0x1E1A: []rune{0x1E1B}, // Case map + 0x1E1C: []rune{0x1E1D}, // Case map + 0x1E1E: []rune{0x1E1F}, // Case map + 0x1E20: []rune{0x1E21}, // Case map + 0x1E22: []rune{0x1E23}, // Case map + 0x1E24: []rune{0x1E25}, // Case map + 0x1E26: []rune{0x1E27}, // Case map + 0x1E28: []rune{0x1E29}, // Case map + 0x1E2A: []rune{0x1E2B}, // Case map + 0x1E2C: []rune{0x1E2D}, // Case map + 0x1E2E: []rune{0x1E2F}, // Case map + 0x1E30: []rune{0x1E31}, // Case map + 0x1E32: []rune{0x1E33}, // Case map + 0x1E34: []rune{0x1E35}, // Case map + 0x1E36: []rune{0x1E37}, // Case map + 0x1E38: []rune{0x1E39}, // Case map + 0x1E3A: []rune{0x1E3B}, // Case map + 0x1E3C: []rune{0x1E3D}, // Case map + 0x1E3E: []rune{0x1E3F}, // Case map + 0x1E40: []rune{0x1E41}, // Case map + 0x1E42: []rune{0x1E43}, // Case map + 0x1E44: []rune{0x1E45}, // Case map + 0x1E46: []rune{0x1E47}, // Case map + 0x1E48: []rune{0x1E49}, // Case map + 0x1E4A: []rune{0x1E4B}, // Case map + 0x1E4C: []rune{0x1E4D}, // Case map + 0x1E4E: []rune{0x1E4F}, // Case map + 0x1E50: []rune{0x1E51}, // Case map + 0x1E52: []rune{0x1E53}, // Case map + 0x1E54: []rune{0x1E55}, // Case map + 0x1E56: []rune{0x1E57}, // Case map + 0x1E58: []rune{0x1E59}, // Case map + 0x1E5A: []rune{0x1E5B}, // Case map + 0x1E5C: []rune{0x1E5D}, // Case map + 0x1E5E: []rune{0x1E5F}, // Case map + 0x1E60: []rune{0x1E61}, // Case map + 0x1E62: []rune{0x1E63}, // Case map + 0x1E64: []rune{0x1E65}, // Case map + 0x1E66: []rune{0x1E67}, // Case map + 0x1E68: []rune{0x1E69}, // Case map + 0x1E6A: []rune{0x1E6B}, // Case map + 0x1E6C: []rune{0x1E6D}, // Case map + 0x1E6E: []rune{0x1E6F}, // Case map + 0x1E70: []rune{0x1E71}, // Case map + 0x1E72: []rune{0x1E73}, // Case map + 0x1E74: []rune{0x1E75}, // Case map + 0x1E76: []rune{0x1E77}, // Case map + 0x1E78: []rune{0x1E79}, // Case map + 0x1E7A: []rune{0x1E7B}, // Case map + 0x1E7C: []rune{0x1E7D}, // Case map + 0x1E7E: []rune{0x1E7F}, // Case map + 0x1E80: []rune{0x1E81}, // Case map + 0x1E82: []rune{0x1E83}, // Case map + 0x1E84: []rune{0x1E85}, // Case map + 0x1E86: []rune{0x1E87}, // Case map + 0x1E88: []rune{0x1E89}, // Case map + 0x1E8A: []rune{0x1E8B}, // Case map + 0x1E8C: []rune{0x1E8D}, // Case map + 0x1E8E: []rune{0x1E8F}, // Case map + 0x1E90: []rune{0x1E91}, // Case map + 0x1E92: []rune{0x1E93}, // Case map + 0x1E94: []rune{0x1E95}, // Case map + 0x1E96: []rune{0x0068, 0x0331}, // Case map + 0x1E97: []rune{0x0074, 0x0308}, // Case map + 0x1E98: []rune{0x0077, 0x030A}, // Case map + 0x1E99: []rune{0x0079, 0x030A}, // Case map + 0x1E9A: []rune{0x0061, 0x02BE}, // Case map + 0x1E9B: []rune{0x1E61}, // Case map + 0x1EA0: []rune{0x1EA1}, // Case map + 0x1EA2: []rune{0x1EA3}, // Case map + 0x1EA4: []rune{0x1EA5}, // Case map + 0x1EA6: []rune{0x1EA7}, // Case map + 0x1EA8: []rune{0x1EA9}, // Case map + 0x1EAA: []rune{0x1EAB}, // Case map + 0x1EAC: []rune{0x1EAD}, // Case map + 0x1EAE: []rune{0x1EAF}, // Case map + 0x1EB0: []rune{0x1EB1}, // Case map + 0x1EB2: []rune{0x1EB3}, // Case map + 0x1EB4: []rune{0x1EB5}, // Case map + 0x1EB6: []rune{0x1EB7}, // Case map + 0x1EB8: []rune{0x1EB9}, // Case map + 0x1EBA: []rune{0x1EBB}, // Case map + 0x1EBC: []rune{0x1EBD}, // Case map + 0x1EBE: []rune{0x1EBF}, // Case map + 0x1EC0: []rune{0x1EC1}, // Case map + 0x1EC2: []rune{0x1EC3}, // Case map + 0x1EC4: []rune{0x1EC5}, // Case map + 0x1EC6: []rune{0x1EC7}, // Case map + 0x1EC8: []rune{0x1EC9}, // Case map + 0x1ECA: []rune{0x1ECB}, // Case map + 0x1ECC: []rune{0x1ECD}, // Case map + 0x1ECE: []rune{0x1ECF}, // Case map + 0x1ED0: []rune{0x1ED1}, // Case map + 0x1ED2: []rune{0x1ED3}, // Case map + 0x1ED4: []rune{0x1ED5}, // Case map + 0x1ED6: []rune{0x1ED7}, // Case map + 0x1ED8: []rune{0x1ED9}, // Case map + 0x1EDA: []rune{0x1EDB}, // Case map + 0x1EDC: []rune{0x1EDD}, // Case map + 0x1EDE: []rune{0x1EDF}, // Case map + 0x1EE0: []rune{0x1EE1}, // Case map + 0x1EE2: []rune{0x1EE3}, // Case map + 0x1EE4: []rune{0x1EE5}, // Case map + 0x1EE6: []rune{0x1EE7}, // Case map + 0x1EE8: []rune{0x1EE9}, // Case map + 0x1EEA: []rune{0x1EEB}, // Case map + 0x1EEC: []rune{0x1EED}, // Case map + 0x1EEE: []rune{0x1EEF}, // Case map + 0x1EF0: []rune{0x1EF1}, // Case map + 0x1EF2: []rune{0x1EF3}, // Case map + 0x1EF4: []rune{0x1EF5}, // Case map + 0x1EF6: []rune{0x1EF7}, // Case map + 0x1EF8: []rune{0x1EF9}, // Case map + 0x1F08: []rune{0x1F00}, // Case map + 0x1F09: []rune{0x1F01}, // Case map + 0x1F0A: []rune{0x1F02}, // Case map + 0x1F0B: []rune{0x1F03}, // Case map + 0x1F0C: []rune{0x1F04}, // Case map + 0x1F0D: []rune{0x1F05}, // Case map + 0x1F0E: []rune{0x1F06}, // Case map + 0x1F0F: []rune{0x1F07}, // Case map + 0x1F18: []rune{0x1F10}, // Case map + 0x1F19: []rune{0x1F11}, // Case map + 0x1F1A: []rune{0x1F12}, // Case map + 0x1F1B: []rune{0x1F13}, // Case map + 0x1F1C: []rune{0x1F14}, // Case map + 0x1F1D: []rune{0x1F15}, // Case map + 0x1F28: []rune{0x1F20}, // Case map + 0x1F29: []rune{0x1F21}, // Case map + 0x1F2A: []rune{0x1F22}, // Case map + 0x1F2B: []rune{0x1F23}, // Case map + 0x1F2C: []rune{0x1F24}, // Case map + 0x1F2D: []rune{0x1F25}, // Case map + 0x1F2E: []rune{0x1F26}, // Case map + 0x1F2F: []rune{0x1F27}, // Case map + 0x1F38: []rune{0x1F30}, // Case map + 0x1F39: []rune{0x1F31}, // Case map + 0x1F3A: []rune{0x1F32}, // Case map + 0x1F3B: []rune{0x1F33}, // Case map + 0x1F3C: []rune{0x1F34}, // Case map + 0x1F3D: []rune{0x1F35}, // Case map + 0x1F3E: []rune{0x1F36}, // Case map + 0x1F3F: []rune{0x1F37}, // Case map + 0x1F48: []rune{0x1F40}, // Case map + 0x1F49: []rune{0x1F41}, // Case map + 0x1F4A: []rune{0x1F42}, // Case map + 0x1F4B: []rune{0x1F43}, // Case map + 0x1F4C: []rune{0x1F44}, // Case map + 0x1F4D: []rune{0x1F45}, // Case map + 0x1F50: []rune{0x03C5, 0x0313}, // Case map + 0x1F52: []rune{0x03C5, 0x0313, 0x0300}, // Case map + 0x1F54: []rune{0x03C5, 0x0313, 0x0301}, // Case map + 0x1F56: []rune{0x03C5, 0x0313, 0x0342}, // Case map + 0x1F59: []rune{0x1F51}, // Case map + 0x1F5B: []rune{0x1F53}, // Case map + 0x1F5D: []rune{0x1F55}, // Case map + 0x1F5F: []rune{0x1F57}, // Case map + 0x1F68: []rune{0x1F60}, // Case map + 0x1F69: []rune{0x1F61}, // Case map + 0x1F6A: []rune{0x1F62}, // Case map + 0x1F6B: []rune{0x1F63}, // Case map + 0x1F6C: []rune{0x1F64}, // Case map + 0x1F6D: []rune{0x1F65}, // Case map + 0x1F6E: []rune{0x1F66}, // Case map + 0x1F6F: []rune{0x1F67}, // Case map + 0x1F80: []rune{0x1F00, 0x03B9}, // Case map + 0x1F81: []rune{0x1F01, 0x03B9}, // Case map + 0x1F82: []rune{0x1F02, 0x03B9}, // Case map + 0x1F83: []rune{0x1F03, 0x03B9}, // Case map + 0x1F84: []rune{0x1F04, 0x03B9}, // Case map + 0x1F85: []rune{0x1F05, 0x03B9}, // Case map + 0x1F86: []rune{0x1F06, 0x03B9}, // Case map + 0x1F87: []rune{0x1F07, 0x03B9}, // Case map + 0x1F88: []rune{0x1F00, 0x03B9}, // Case map + 0x1F89: []rune{0x1F01, 0x03B9}, // Case map + 0x1F8A: []rune{0x1F02, 0x03B9}, // Case map + 0x1F8B: []rune{0x1F03, 0x03B9}, // Case map + 0x1F8C: []rune{0x1F04, 0x03B9}, // Case map + 0x1F8D: []rune{0x1F05, 0x03B9}, // Case map + 0x1F8E: []rune{0x1F06, 0x03B9}, // Case map + 0x1F8F: []rune{0x1F07, 0x03B9}, // Case map + 0x1F90: []rune{0x1F20, 0x03B9}, // Case map + 0x1F91: []rune{0x1F21, 0x03B9}, // Case map + 0x1F92: []rune{0x1F22, 0x03B9}, // Case map + 0x1F93: []rune{0x1F23, 0x03B9}, // Case map + 0x1F94: []rune{0x1F24, 0x03B9}, // Case map + 0x1F95: []rune{0x1F25, 0x03B9}, // Case map + 0x1F96: []rune{0x1F26, 0x03B9}, // Case map + 0x1F97: []rune{0x1F27, 0x03B9}, // Case map + 0x1F98: []rune{0x1F20, 0x03B9}, // Case map + 0x1F99: []rune{0x1F21, 0x03B9}, // Case map + 0x1F9A: []rune{0x1F22, 0x03B9}, // Case map + 0x1F9B: []rune{0x1F23, 0x03B9}, // Case map + 0x1F9C: []rune{0x1F24, 0x03B9}, // Case map + 0x1F9D: []rune{0x1F25, 0x03B9}, // Case map + 0x1F9E: []rune{0x1F26, 0x03B9}, // Case map + 0x1F9F: []rune{0x1F27, 0x03B9}, // Case map + 0x1FA0: []rune{0x1F60, 0x03B9}, // Case map + 0x1FA1: []rune{0x1F61, 0x03B9}, // Case map + 0x1FA2: []rune{0x1F62, 0x03B9}, // Case map + 0x1FA3: []rune{0x1F63, 0x03B9}, // Case map + 0x1FA4: []rune{0x1F64, 0x03B9}, // Case map + 0x1FA5: []rune{0x1F65, 0x03B9}, // Case map + 0x1FA6: []rune{0x1F66, 0x03B9}, // Case map + 0x1FA7: []rune{0x1F67, 0x03B9}, // Case map + 0x1FA8: []rune{0x1F60, 0x03B9}, // Case map + 0x1FA9: []rune{0x1F61, 0x03B9}, // Case map + 0x1FAA: []rune{0x1F62, 0x03B9}, // Case map + 0x1FAB: []rune{0x1F63, 0x03B9}, // Case map + 0x1FAC: []rune{0x1F64, 0x03B9}, // Case map + 0x1FAD: []rune{0x1F65, 0x03B9}, // Case map + 0x1FAE: []rune{0x1F66, 0x03B9}, // Case map + 0x1FAF: []rune{0x1F67, 0x03B9}, // Case map + 0x1FB2: []rune{0x1F70, 0x03B9}, // Case map + 0x1FB3: []rune{0x03B1, 0x03B9}, // Case map + 0x1FB4: []rune{0x03AC, 0x03B9}, // Case map + 0x1FB6: []rune{0x03B1, 0x0342}, // Case map + 0x1FB7: []rune{0x03B1, 0x0342, 0x03B9}, // Case map + 0x1FB8: []rune{0x1FB0}, // Case map + 0x1FB9: []rune{0x1FB1}, // Case map + 0x1FBA: []rune{0x1F70}, // Case map + 0x1FBB: []rune{0x1F71}, // Case map + 0x1FBC: []rune{0x03B1, 0x03B9}, // Case map + 0x1FBE: []rune{0x03B9}, // Case map + 0x1FC2: []rune{0x1F74, 0x03B9}, // Case map + 0x1FC3: []rune{0x03B7, 0x03B9}, // Case map + 0x1FC4: []rune{0x03AE, 0x03B9}, // Case map + 0x1FC6: []rune{0x03B7, 0x0342}, // Case map + 0x1FC7: []rune{0x03B7, 0x0342, 0x03B9}, // Case map + 0x1FC8: []rune{0x1F72}, // Case map + 0x1FC9: []rune{0x1F73}, // Case map + 0x1FCA: []rune{0x1F74}, // Case map + 0x1FCB: []rune{0x1F75}, // Case map + 0x1FCC: []rune{0x03B7, 0x03B9}, // Case map + 0x1FD2: []rune{0x03B9, 0x0308, 0x0300}, // Case map + 0x1FD3: []rune{0x03B9, 0x0308, 0x0301}, // Case map + 0x1FD6: []rune{0x03B9, 0x0342}, // Case map + 0x1FD7: []rune{0x03B9, 0x0308, 0x0342}, // Case map + 0x1FD8: []rune{0x1FD0}, // Case map + 0x1FD9: []rune{0x1FD1}, // Case map + 0x1FDA: []rune{0x1F76}, // Case map + 0x1FDB: []rune{0x1F77}, // Case map + 0x1FE2: []rune{0x03C5, 0x0308, 0x0300}, // Case map + 0x1FE3: []rune{0x03C5, 0x0308, 0x0301}, // Case map + 0x1FE4: []rune{0x03C1, 0x0313}, // Case map + 0x1FE6: []rune{0x03C5, 0x0342}, // Case map + 0x1FE7: []rune{0x03C5, 0x0308, 0x0342}, // Case map + 0x1FE8: []rune{0x1FE0}, // Case map + 0x1FE9: []rune{0x1FE1}, // Case map + 0x1FEA: []rune{0x1F7A}, // Case map + 0x1FEB: []rune{0x1F7B}, // Case map + 0x1FEC: []rune{0x1FE5}, // Case map + 0x1FF2: []rune{0x1F7C, 0x03B9}, // Case map + 0x1FF3: []rune{0x03C9, 0x03B9}, // Case map + 0x1FF4: []rune{0x03CE, 0x03B9}, // Case map + 0x1FF6: []rune{0x03C9, 0x0342}, // Case map + 0x1FF7: []rune{0x03C9, 0x0342, 0x03B9}, // Case map + 0x1FF8: []rune{0x1F78}, // Case map + 0x1FF9: []rune{0x1F79}, // Case map + 0x1FFA: []rune{0x1F7C}, // Case map + 0x1FFB: []rune{0x1F7D}, // Case map + 0x1FFC: []rune{0x03C9, 0x03B9}, // Case map + 0x20A8: []rune{0x0072, 0x0073}, // Additional folding + 0x2102: []rune{0x0063}, // Additional folding + 0x2103: []rune{0x00B0, 0x0063}, // Additional folding + 0x2107: []rune{0x025B}, // Additional folding + 0x2109: []rune{0x00B0, 0x0066}, // Additional folding + 0x210B: []rune{0x0068}, // Additional folding + 0x210C: []rune{0x0068}, // Additional folding + 0x210D: []rune{0x0068}, // Additional folding + 0x2110: []rune{0x0069}, // Additional folding + 0x2111: []rune{0x0069}, // Additional folding + 0x2112: []rune{0x006C}, // Additional folding + 0x2115: []rune{0x006E}, // Additional folding + 0x2116: []rune{0x006E, 0x006F}, // Additional folding + 0x2119: []rune{0x0070}, // Additional folding + 0x211A: []rune{0x0071}, // Additional folding + 0x211B: []rune{0x0072}, // Additional folding + 0x211C: []rune{0x0072}, // Additional folding + 0x211D: []rune{0x0072}, // Additional folding + 0x2120: []rune{0x0073, 0x006D}, // Additional folding + 0x2121: []rune{0x0074, 0x0065, 0x006C}, // Additional folding + 0x2122: []rune{0x0074, 0x006D}, // Additional folding + 0x2124: []rune{0x007A}, // Additional folding + 0x2126: []rune{0x03C9}, // Case map + 0x2128: []rune{0x007A}, // Additional folding + 0x212A: []rune{0x006B}, // Case map + 0x212B: []rune{0x00E5}, // Case map + 0x212C: []rune{0x0062}, // Additional folding + 0x212D: []rune{0x0063}, // Additional folding + 0x2130: []rune{0x0065}, // Additional folding + 0x2131: []rune{0x0066}, // Additional folding + 0x2133: []rune{0x006D}, // Additional folding + 0x213E: []rune{0x03B3}, // Additional folding + 0x213F: []rune{0x03C0}, // Additional folding + 0x2145: []rune{0x0064}, // Additional folding + 0x2160: []rune{0x2170}, // Case map + 0x2161: []rune{0x2171}, // Case map + 0x2162: []rune{0x2172}, // Case map + 0x2163: []rune{0x2173}, // Case map + 0x2164: []rune{0x2174}, // Case map + 0x2165: []rune{0x2175}, // Case map + 0x2166: []rune{0x2176}, // Case map + 0x2167: []rune{0x2177}, // Case map + 0x2168: []rune{0x2178}, // Case map + 0x2169: []rune{0x2179}, // Case map + 0x216A: []rune{0x217A}, // Case map + 0x216B: []rune{0x217B}, // Case map + 0x216C: []rune{0x217C}, // Case map + 0x216D: []rune{0x217D}, // Case map + 0x216E: []rune{0x217E}, // Case map + 0x216F: []rune{0x217F}, // Case map + 0x24B6: []rune{0x24D0}, // Case map + 0x24B7: []rune{0x24D1}, // Case map + 0x24B8: []rune{0x24D2}, // Case map + 0x24B9: []rune{0x24D3}, // Case map + 0x24BA: []rune{0x24D4}, // Case map + 0x24BB: []rune{0x24D5}, // Case map + 0x24BC: []rune{0x24D6}, // Case map + 0x24BD: []rune{0x24D7}, // Case map + 0x24BE: []rune{0x24D8}, // Case map + 0x24BF: []rune{0x24D9}, // Case map + 0x24C0: []rune{0x24DA}, // Case map + 0x24C1: []rune{0x24DB}, // Case map + 0x24C2: []rune{0x24DC}, // Case map + 0x24C3: []rune{0x24DD}, // Case map + 0x24C4: []rune{0x24DE}, // Case map + 0x24C5: []rune{0x24DF}, // Case map + 0x24C6: []rune{0x24E0}, // Case map + 0x24C7: []rune{0x24E1}, // Case map + 0x24C8: []rune{0x24E2}, // Case map + 0x24C9: []rune{0x24E3}, // Case map + 0x24CA: []rune{0x24E4}, // Case map + 0x24CB: []rune{0x24E5}, // Case map + 0x24CC: []rune{0x24E6}, // Case map + 0x24CD: []rune{0x24E7}, // Case map + 0x24CE: []rune{0x24E8}, // Case map + 0x24CF: []rune{0x24E9}, // Case map + 0x3371: []rune{0x0068, 0x0070, 0x0061}, // Additional folding + 0x3373: []rune{0x0061, 0x0075}, // Additional folding + 0x3375: []rune{0x006F, 0x0076}, // Additional folding + 0x3380: []rune{0x0070, 0x0061}, // Additional folding + 0x3381: []rune{0x006E, 0x0061}, // Additional folding + 0x3382: []rune{0x03BC, 0x0061}, // Additional folding + 0x3383: []rune{0x006D, 0x0061}, // Additional folding + 0x3384: []rune{0x006B, 0x0061}, // Additional folding + 0x3385: []rune{0x006B, 0x0062}, // Additional folding + 0x3386: []rune{0x006D, 0x0062}, // Additional folding + 0x3387: []rune{0x0067, 0x0062}, // Additional folding + 0x338A: []rune{0x0070, 0x0066}, // Additional folding + 0x338B: []rune{0x006E, 0x0066}, // Additional folding + 0x338C: []rune{0x03BC, 0x0066}, // Additional folding + 0x3390: []rune{0x0068, 0x007A}, // Additional folding + 0x3391: []rune{0x006B, 0x0068, 0x007A}, // Additional folding + 0x3392: []rune{0x006D, 0x0068, 0x007A}, // Additional folding + 0x3393: []rune{0x0067, 0x0068, 0x007A}, // Additional folding + 0x3394: []rune{0x0074, 0x0068, 0x007A}, // Additional folding + 0x33A9: []rune{0x0070, 0x0061}, // Additional folding + 0x33AA: []rune{0x006B, 0x0070, 0x0061}, // Additional folding + 0x33AB: []rune{0x006D, 0x0070, 0x0061}, // Additional folding + 0x33AC: []rune{0x0067, 0x0070, 0x0061}, // Additional folding + 0x33B4: []rune{0x0070, 0x0076}, // Additional folding + 0x33B5: []rune{0x006E, 0x0076}, // Additional folding + 0x33B6: []rune{0x03BC, 0x0076}, // Additional folding + 0x33B7: []rune{0x006D, 0x0076}, // Additional folding + 0x33B8: []rune{0x006B, 0x0076}, // Additional folding + 0x33B9: []rune{0x006D, 0x0076}, // Additional folding + 0x33BA: []rune{0x0070, 0x0077}, // Additional folding + 0x33BB: []rune{0x006E, 0x0077}, // Additional folding + 0x33BC: []rune{0x03BC, 0x0077}, // Additional folding + 0x33BD: []rune{0x006D, 0x0077}, // Additional folding + 0x33BE: []rune{0x006B, 0x0077}, // Additional folding + 0x33BF: []rune{0x006D, 0x0077}, // Additional folding + 0x33C0: []rune{0x006B, 0x03C9}, // Additional folding + 0x33C1: []rune{0x006D, 0x03C9}, // Additional folding + 0x33C3: []rune{0x0062, 0x0071}, // Additional folding + 0x33C6: []rune{0x0063, 0x2215, 0x006B, 0x0067}, // Additional folding + 0x33C7: []rune{0x0063, 0x006F, 0x002E}, // Additional folding + 0x33C8: []rune{0x0064, 0x0062}, // Additional folding + 0x33C9: []rune{0x0067, 0x0079}, // Additional folding + 0x33CB: []rune{0x0068, 0x0070}, // Additional folding + 0x33CD: []rune{0x006B, 0x006B}, // Additional folding + 0x33CE: []rune{0x006B, 0x006D}, // Additional folding + 0x33D7: []rune{0x0070, 0x0068}, // Additional folding + 0x33D9: []rune{0x0070, 0x0070, 0x006D}, // Additional folding + 0x33DA: []rune{0x0070, 0x0072}, // Additional folding + 0x33DC: []rune{0x0073, 0x0076}, // Additional folding + 0x33DD: []rune{0x0077, 0x0062}, // Additional folding + 0xFB00: []rune{0x0066, 0x0066}, // Case map + 0xFB01: []rune{0x0066, 0x0069}, // Case map + 0xFB02: []rune{0x0066, 0x006C}, // Case map + 0xFB03: []rune{0x0066, 0x0066, 0x0069}, // Case map + 0xFB04: []rune{0x0066, 0x0066, 0x006C}, // Case map + 0xFB05: []rune{0x0073, 0x0074}, // Case map + 0xFB06: []rune{0x0073, 0x0074}, // Case map + 0xFB13: []rune{0x0574, 0x0576}, // Case map + 0xFB14: []rune{0x0574, 0x0565}, // Case map + 0xFB15: []rune{0x0574, 0x056B}, // Case map + 0xFB16: []rune{0x057E, 0x0576}, // Case map + 0xFB17: []rune{0x0574, 0x056D}, // Case map + 0xFF21: []rune{0xFF41}, // Case map + 0xFF22: []rune{0xFF42}, // Case map + 0xFF23: []rune{0xFF43}, // Case map + 0xFF24: []rune{0xFF44}, // Case map + 0xFF25: []rune{0xFF45}, // Case map + 0xFF26: []rune{0xFF46}, // Case map + 0xFF27: []rune{0xFF47}, // Case map + 0xFF28: []rune{0xFF48}, // Case map + 0xFF29: []rune{0xFF49}, // Case map + 0xFF2A: []rune{0xFF4A}, // Case map + 0xFF2B: []rune{0xFF4B}, // Case map + 0xFF2C: []rune{0xFF4C}, // Case map + 0xFF2D: []rune{0xFF4D}, // Case map + 0xFF2E: []rune{0xFF4E}, // Case map + 0xFF2F: []rune{0xFF4F}, // Case map + 0xFF30: []rune{0xFF50}, // Case map + 0xFF31: []rune{0xFF51}, // Case map + 0xFF32: []rune{0xFF52}, // Case map + 0xFF33: []rune{0xFF53}, // Case map + 0xFF34: []rune{0xFF54}, // Case map + 0xFF35: []rune{0xFF55}, // Case map + 0xFF36: []rune{0xFF56}, // Case map + 0xFF37: []rune{0xFF57}, // Case map + 0xFF38: []rune{0xFF58}, // Case map + 0xFF39: []rune{0xFF59}, // Case map + 0xFF3A: []rune{0xFF5A}, // Case map + 0x10400: []rune{0x10428}, // Case map + 0x10401: []rune{0x10429}, // Case map + 0x10402: []rune{0x1042A}, // Case map + 0x10403: []rune{0x1042B}, // Case map + 0x10404: []rune{0x1042C}, // Case map + 0x10405: []rune{0x1042D}, // Case map + 0x10406: []rune{0x1042E}, // Case map + 0x10407: []rune{0x1042F}, // Case map + 0x10408: []rune{0x10430}, // Case map + 0x10409: []rune{0x10431}, // Case map + 0x1040A: []rune{0x10432}, // Case map + 0x1040B: []rune{0x10433}, // Case map + 0x1040C: []rune{0x10434}, // Case map + 0x1040D: []rune{0x10435}, // Case map + 0x1040E: []rune{0x10436}, // Case map + 0x1040F: []rune{0x10437}, // Case map + 0x10410: []rune{0x10438}, // Case map + 0x10411: []rune{0x10439}, // Case map + 0x10412: []rune{0x1043A}, // Case map + 0x10413: []rune{0x1043B}, // Case map + 0x10414: []rune{0x1043C}, // Case map + 0x10415: []rune{0x1043D}, // Case map + 0x10416: []rune{0x1043E}, // Case map + 0x10417: []rune{0x1043F}, // Case map + 0x10418: []rune{0x10440}, // Case map + 0x10419: []rune{0x10441}, // Case map + 0x1041A: []rune{0x10442}, // Case map + 0x1041B: []rune{0x10443}, // Case map + 0x1041C: []rune{0x10444}, // Case map + 0x1041D: []rune{0x10445}, // Case map + 0x1041E: []rune{0x10446}, // Case map + 0x1041F: []rune{0x10447}, // Case map + 0x10420: []rune{0x10448}, // Case map + 0x10421: []rune{0x10449}, // Case map + 0x10422: []rune{0x1044A}, // Case map + 0x10423: []rune{0x1044B}, // Case map + 0x10424: []rune{0x1044C}, // Case map + 0x10425: []rune{0x1044D}, // Case map + 0x1D400: []rune{0x0061}, // Additional folding + 0x1D401: []rune{0x0062}, // Additional folding + 0x1D402: []rune{0x0063}, // Additional folding + 0x1D403: []rune{0x0064}, // Additional folding + 0x1D404: []rune{0x0065}, // Additional folding + 0x1D405: []rune{0x0066}, // Additional folding + 0x1D406: []rune{0x0067}, // Additional folding + 0x1D407: []rune{0x0068}, // Additional folding + 0x1D408: []rune{0x0069}, // Additional folding + 0x1D409: []rune{0x006A}, // Additional folding + 0x1D40A: []rune{0x006B}, // Additional folding + 0x1D40B: []rune{0x006C}, // Additional folding + 0x1D40C: []rune{0x006D}, // Additional folding + 0x1D40D: []rune{0x006E}, // Additional folding + 0x1D40E: []rune{0x006F}, // Additional folding + 0x1D40F: []rune{0x0070}, // Additional folding + 0x1D410: []rune{0x0071}, // Additional folding + 0x1D411: []rune{0x0072}, // Additional folding + 0x1D412: []rune{0x0073}, // Additional folding + 0x1D413: []rune{0x0074}, // Additional folding + 0x1D414: []rune{0x0075}, // Additional folding + 0x1D415: []rune{0x0076}, // Additional folding + 0x1D416: []rune{0x0077}, // Additional folding + 0x1D417: []rune{0x0078}, // Additional folding + 0x1D418: []rune{0x0079}, // Additional folding + 0x1D419: []rune{0x007A}, // Additional folding + 0x1D434: []rune{0x0061}, // Additional folding + 0x1D435: []rune{0x0062}, // Additional folding + 0x1D436: []rune{0x0063}, // Additional folding + 0x1D437: []rune{0x0064}, // Additional folding + 0x1D438: []rune{0x0065}, // Additional folding + 0x1D439: []rune{0x0066}, // Additional folding + 0x1D43A: []rune{0x0067}, // Additional folding + 0x1D43B: []rune{0x0068}, // Additional folding + 0x1D43C: []rune{0x0069}, // Additional folding + 0x1D43D: []rune{0x006A}, // Additional folding + 0x1D43E: []rune{0x006B}, // Additional folding + 0x1D43F: []rune{0x006C}, // Additional folding + 0x1D440: []rune{0x006D}, // Additional folding + 0x1D441: []rune{0x006E}, // Additional folding + 0x1D442: []rune{0x006F}, // Additional folding + 0x1D443: []rune{0x0070}, // Additional folding + 0x1D444: []rune{0x0071}, // Additional folding + 0x1D445: []rune{0x0072}, // Additional folding + 0x1D446: []rune{0x0073}, // Additional folding + 0x1D447: []rune{0x0074}, // Additional folding + 0x1D448: []rune{0x0075}, // Additional folding + 0x1D449: []rune{0x0076}, // Additional folding + 0x1D44A: []rune{0x0077}, // Additional folding + 0x1D44B: []rune{0x0078}, // Additional folding + 0x1D44C: []rune{0x0079}, // Additional folding + 0x1D44D: []rune{0x007A}, // Additional folding + 0x1D468: []rune{0x0061}, // Additional folding + 0x1D469: []rune{0x0062}, // Additional folding + 0x1D46A: []rune{0x0063}, // Additional folding + 0x1D46B: []rune{0x0064}, // Additional folding + 0x1D46C: []rune{0x0065}, // Additional folding + 0x1D46D: []rune{0x0066}, // Additional folding + 0x1D46E: []rune{0x0067}, // Additional folding + 0x1D46F: []rune{0x0068}, // Additional folding + 0x1D470: []rune{0x0069}, // Additional folding + 0x1D471: []rune{0x006A}, // Additional folding + 0x1D472: []rune{0x006B}, // Additional folding + 0x1D473: []rune{0x006C}, // Additional folding + 0x1D474: []rune{0x006D}, // Additional folding + 0x1D475: []rune{0x006E}, // Additional folding + 0x1D476: []rune{0x006F}, // Additional folding + 0x1D477: []rune{0x0070}, // Additional folding + 0x1D478: []rune{0x0071}, // Additional folding + 0x1D479: []rune{0x0072}, // Additional folding + 0x1D47A: []rune{0x0073}, // Additional folding + 0x1D47B: []rune{0x0074}, // Additional folding + 0x1D47C: []rune{0x0075}, // Additional folding + 0x1D47D: []rune{0x0076}, // Additional folding + 0x1D47E: []rune{0x0077}, // Additional folding + 0x1D47F: []rune{0x0078}, // Additional folding + 0x1D480: []rune{0x0079}, // Additional folding + 0x1D481: []rune{0x007A}, // Additional folding + 0x1D49C: []rune{0x0061}, // Additional folding + 0x1D49E: []rune{0x0063}, // Additional folding + 0x1D49F: []rune{0x0064}, // Additional folding + 0x1D4A2: []rune{0x0067}, // Additional folding + 0x1D4A5: []rune{0x006A}, // Additional folding + 0x1D4A6: []rune{0x006B}, // Additional folding + 0x1D4A9: []rune{0x006E}, // Additional folding + 0x1D4AA: []rune{0x006F}, // Additional folding + 0x1D4AB: []rune{0x0070}, // Additional folding + 0x1D4AC: []rune{0x0071}, // Additional folding + 0x1D4AE: []rune{0x0073}, // Additional folding + 0x1D4AF: []rune{0x0074}, // Additional folding + 0x1D4B0: []rune{0x0075}, // Additional folding + 0x1D4B1: []rune{0x0076}, // Additional folding + 0x1D4B2: []rune{0x0077}, // Additional folding + 0x1D4B3: []rune{0x0078}, // Additional folding + 0x1D4B4: []rune{0x0079}, // Additional folding + 0x1D4B5: []rune{0x007A}, // Additional folding + 0x1D4D0: []rune{0x0061}, // Additional folding + 0x1D4D1: []rune{0x0062}, // Additional folding + 0x1D4D2: []rune{0x0063}, // Additional folding + 0x1D4D3: []rune{0x0064}, // Additional folding + 0x1D4D4: []rune{0x0065}, // Additional folding + 0x1D4D5: []rune{0x0066}, // Additional folding + 0x1D4D6: []rune{0x0067}, // Additional folding + 0x1D4D7: []rune{0x0068}, // Additional folding + 0x1D4D8: []rune{0x0069}, // Additional folding + 0x1D4D9: []rune{0x006A}, // Additional folding + 0x1D4DA: []rune{0x006B}, // Additional folding + 0x1D4DB: []rune{0x006C}, // Additional folding + 0x1D4DC: []rune{0x006D}, // Additional folding + 0x1D4DD: []rune{0x006E}, // Additional folding + 0x1D4DE: []rune{0x006F}, // Additional folding + 0x1D4DF: []rune{0x0070}, // Additional folding + 0x1D4E0: []rune{0x0071}, // Additional folding + 0x1D4E1: []rune{0x0072}, // Additional folding + 0x1D4E2: []rune{0x0073}, // Additional folding + 0x1D4E3: []rune{0x0074}, // Additional folding + 0x1D4E4: []rune{0x0075}, // Additional folding + 0x1D4E5: []rune{0x0076}, // Additional folding + 0x1D4E6: []rune{0x0077}, // Additional folding + 0x1D4E7: []rune{0x0078}, // Additional folding + 0x1D4E8: []rune{0x0079}, // Additional folding + 0x1D4E9: []rune{0x007A}, // Additional folding + 0x1D504: []rune{0x0061}, // Additional folding + 0x1D505: []rune{0x0062}, // Additional folding + 0x1D507: []rune{0x0064}, // Additional folding + 0x1D508: []rune{0x0065}, // Additional folding + 0x1D509: []rune{0x0066}, // Additional folding + 0x1D50A: []rune{0x0067}, // Additional folding + 0x1D50D: []rune{0x006A}, // Additional folding + 0x1D50E: []rune{0x006B}, // Additional folding + 0x1D50F: []rune{0x006C}, // Additional folding + 0x1D510: []rune{0x006D}, // Additional folding + 0x1D511: []rune{0x006E}, // Additional folding + 0x1D512: []rune{0x006F}, // Additional folding + 0x1D513: []rune{0x0070}, // Additional folding + 0x1D514: []rune{0x0071}, // Additional folding + 0x1D516: []rune{0x0073}, // Additional folding + 0x1D517: []rune{0x0074}, // Additional folding + 0x1D518: []rune{0x0075}, // Additional folding + 0x1D519: []rune{0x0076}, // Additional folding + 0x1D51A: []rune{0x0077}, // Additional folding + 0x1D51B: []rune{0x0078}, // Additional folding + 0x1D51C: []rune{0x0079}, // Additional folding + 0x1D538: []rune{0x0061}, // Additional folding + 0x1D539: []rune{0x0062}, // Additional folding + 0x1D53B: []rune{0x0064}, // Additional folding + 0x1D53C: []rune{0x0065}, // Additional folding + 0x1D53D: []rune{0x0066}, // Additional folding + 0x1D53E: []rune{0x0067}, // Additional folding + 0x1D540: []rune{0x0069}, // Additional folding + 0x1D541: []rune{0x006A}, // Additional folding + 0x1D542: []rune{0x006B}, // Additional folding + 0x1D543: []rune{0x006C}, // Additional folding + 0x1D544: []rune{0x006D}, // Additional folding + 0x1D546: []rune{0x006F}, // Additional folding + 0x1D54A: []rune{0x0073}, // Additional folding + 0x1D54B: []rune{0x0074}, // Additional folding + 0x1D54C: []rune{0x0075}, // Additional folding + 0x1D54D: []rune{0x0076}, // Additional folding + 0x1D54E: []rune{0x0077}, // Additional folding + 0x1D54F: []rune{0x0078}, // Additional folding + 0x1D550: []rune{0x0079}, // Additional folding + 0x1D56C: []rune{0x0061}, // Additional folding + 0x1D56D: []rune{0x0062}, // Additional folding + 0x1D56E: []rune{0x0063}, // Additional folding + 0x1D56F: []rune{0x0064}, // Additional folding + 0x1D570: []rune{0x0065}, // Additional folding + 0x1D571: []rune{0x0066}, // Additional folding + 0x1D572: []rune{0x0067}, // Additional folding + 0x1D573: []rune{0x0068}, // Additional folding + 0x1D574: []rune{0x0069}, // Additional folding + 0x1D575: []rune{0x006A}, // Additional folding + 0x1D576: []rune{0x006B}, // Additional folding + 0x1D577: []rune{0x006C}, // Additional folding + 0x1D578: []rune{0x006D}, // Additional folding + 0x1D579: []rune{0x006E}, // Additional folding + 0x1D57A: []rune{0x006F}, // Additional folding + 0x1D57B: []rune{0x0070}, // Additional folding + 0x1D57C: []rune{0x0071}, // Additional folding + 0x1D57D: []rune{0x0072}, // Additional folding + 0x1D57E: []rune{0x0073}, // Additional folding + 0x1D57F: []rune{0x0074}, // Additional folding + 0x1D580: []rune{0x0075}, // Additional folding + 0x1D581: []rune{0x0076}, // Additional folding + 0x1D582: []rune{0x0077}, // Additional folding + 0x1D583: []rune{0x0078}, // Additional folding + 0x1D584: []rune{0x0079}, // Additional folding + 0x1D585: []rune{0x007A}, // Additional folding + 0x1D5A0: []rune{0x0061}, // Additional folding + 0x1D5A1: []rune{0x0062}, // Additional folding + 0x1D5A2: []rune{0x0063}, // Additional folding + 0x1D5A3: []rune{0x0064}, // Additional folding + 0x1D5A4: []rune{0x0065}, // Additional folding + 0x1D5A5: []rune{0x0066}, // Additional folding + 0x1D5A6: []rune{0x0067}, // Additional folding + 0x1D5A7: []rune{0x0068}, // Additional folding + 0x1D5A8: []rune{0x0069}, // Additional folding + 0x1D5A9: []rune{0x006A}, // Additional folding + 0x1D5AA: []rune{0x006B}, // Additional folding + 0x1D5AB: []rune{0x006C}, // Additional folding + 0x1D5AC: []rune{0x006D}, // Additional folding + 0x1D5AD: []rune{0x006E}, // Additional folding + 0x1D5AE: []rune{0x006F}, // Additional folding + 0x1D5AF: []rune{0x0070}, // Additional folding + 0x1D5B0: []rune{0x0071}, // Additional folding + 0x1D5B1: []rune{0x0072}, // Additional folding + 0x1D5B2: []rune{0x0073}, // Additional folding + 0x1D5B3: []rune{0x0074}, // Additional folding + 0x1D5B4: []rune{0x0075}, // Additional folding + 0x1D5B5: []rune{0x0076}, // Additional folding + 0x1D5B6: []rune{0x0077}, // Additional folding + 0x1D5B7: []rune{0x0078}, // Additional folding + 0x1D5B8: []rune{0x0079}, // Additional folding + 0x1D5B9: []rune{0x007A}, // Additional folding + 0x1D5D4: []rune{0x0061}, // Additional folding + 0x1D5D5: []rune{0x0062}, // Additional folding + 0x1D5D6: []rune{0x0063}, // Additional folding + 0x1D5D7: []rune{0x0064}, // Additional folding + 0x1D5D8: []rune{0x0065}, // Additional folding + 0x1D5D9: []rune{0x0066}, // Additional folding + 0x1D5DA: []rune{0x0067}, // Additional folding + 0x1D5DB: []rune{0x0068}, // Additional folding + 0x1D5DC: []rune{0x0069}, // Additional folding + 0x1D5DD: []rune{0x006A}, // Additional folding + 0x1D5DE: []rune{0x006B}, // Additional folding + 0x1D5DF: []rune{0x006C}, // Additional folding + 0x1D5E0: []rune{0x006D}, // Additional folding + 0x1D5E1: []rune{0x006E}, // Additional folding + 0x1D5E2: []rune{0x006F}, // Additional folding + 0x1D5E3: []rune{0x0070}, // Additional folding + 0x1D5E4: []rune{0x0071}, // Additional folding + 0x1D5E5: []rune{0x0072}, // Additional folding + 0x1D5E6: []rune{0x0073}, // Additional folding + 0x1D5E7: []rune{0x0074}, // Additional folding + 0x1D5E8: []rune{0x0075}, // Additional folding + 0x1D5E9: []rune{0x0076}, // Additional folding + 0x1D5EA: []rune{0x0077}, // Additional folding + 0x1D5EB: []rune{0x0078}, // Additional folding + 0x1D5EC: []rune{0x0079}, // Additional folding + 0x1D5ED: []rune{0x007A}, // Additional folding + 0x1D608: []rune{0x0061}, // Additional folding + 0x1D609: []rune{0x0062}, // Additional folding + 0x1D60A: []rune{0x0063}, // Additional folding + 0x1D60B: []rune{0x0064}, // Additional folding + 0x1D60C: []rune{0x0065}, // Additional folding + 0x1D60D: []rune{0x0066}, // Additional folding + 0x1D60E: []rune{0x0067}, // Additional folding + 0x1D60F: []rune{0x0068}, // Additional folding + 0x1D610: []rune{0x0069}, // Additional folding + 0x1D611: []rune{0x006A}, // Additional folding + 0x1D612: []rune{0x006B}, // Additional folding + 0x1D613: []rune{0x006C}, // Additional folding + 0x1D614: []rune{0x006D}, // Additional folding + 0x1D615: []rune{0x006E}, // Additional folding + 0x1D616: []rune{0x006F}, // Additional folding + 0x1D617: []rune{0x0070}, // Additional folding + 0x1D618: []rune{0x0071}, // Additional folding + 0x1D619: []rune{0x0072}, // Additional folding + 0x1D61A: []rune{0x0073}, // Additional folding + 0x1D61B: []rune{0x0074}, // Additional folding + 0x1D61C: []rune{0x0075}, // Additional folding + 0x1D61D: []rune{0x0076}, // Additional folding + 0x1D61E: []rune{0x0077}, // Additional folding + 0x1D61F: []rune{0x0078}, // Additional folding + 0x1D620: []rune{0x0079}, // Additional folding + 0x1D621: []rune{0x007A}, // Additional folding + 0x1D63C: []rune{0x0061}, // Additional folding + 0x1D63D: []rune{0x0062}, // Additional folding + 0x1D63E: []rune{0x0063}, // Additional folding + 0x1D63F: []rune{0x0064}, // Additional folding + 0x1D640: []rune{0x0065}, // Additional folding + 0x1D641: []rune{0x0066}, // Additional folding + 0x1D642: []rune{0x0067}, // Additional folding + 0x1D643: []rune{0x0068}, // Additional folding + 0x1D644: []rune{0x0069}, // Additional folding + 0x1D645: []rune{0x006A}, // Additional folding + 0x1D646: []rune{0x006B}, // Additional folding + 0x1D647: []rune{0x006C}, // Additional folding + 0x1D648: []rune{0x006D}, // Additional folding + 0x1D649: []rune{0x006E}, // Additional folding + 0x1D64A: []rune{0x006F}, // Additional folding + 0x1D64B: []rune{0x0070}, // Additional folding + 0x1D64C: []rune{0x0071}, // Additional folding + 0x1D64D: []rune{0x0072}, // Additional folding + 0x1D64E: []rune{0x0073}, // Additional folding + 0x1D64F: []rune{0x0074}, // Additional folding + 0x1D650: []rune{0x0075}, // Additional folding + 0x1D651: []rune{0x0076}, // Additional folding + 0x1D652: []rune{0x0077}, // Additional folding + 0x1D653: []rune{0x0078}, // Additional folding + 0x1D654: []rune{0x0079}, // Additional folding + 0x1D655: []rune{0x007A}, // Additional folding + 0x1D670: []rune{0x0061}, // Additional folding + 0x1D671: []rune{0x0062}, // Additional folding + 0x1D672: []rune{0x0063}, // Additional folding + 0x1D673: []rune{0x0064}, // Additional folding + 0x1D674: []rune{0x0065}, // Additional folding + 0x1D675: []rune{0x0066}, // Additional folding + 0x1D676: []rune{0x0067}, // Additional folding + 0x1D677: []rune{0x0068}, // Additional folding + 0x1D678: []rune{0x0069}, // Additional folding + 0x1D679: []rune{0x006A}, // Additional folding + 0x1D67A: []rune{0x006B}, // Additional folding + 0x1D67B: []rune{0x006C}, // Additional folding + 0x1D67C: []rune{0x006D}, // Additional folding + 0x1D67D: []rune{0x006E}, // Additional folding + 0x1D67E: []rune{0x006F}, // Additional folding + 0x1D67F: []rune{0x0070}, // Additional folding + 0x1D680: []rune{0x0071}, // Additional folding + 0x1D681: []rune{0x0072}, // Additional folding + 0x1D682: []rune{0x0073}, // Additional folding + 0x1D683: []rune{0x0074}, // Additional folding + 0x1D684: []rune{0x0075}, // Additional folding + 0x1D685: []rune{0x0076}, // Additional folding + 0x1D686: []rune{0x0077}, // Additional folding + 0x1D687: []rune{0x0078}, // Additional folding + 0x1D688: []rune{0x0079}, // Additional folding + 0x1D689: []rune{0x007A}, // Additional folding + 0x1D6A8: []rune{0x03B1}, // Additional folding + 0x1D6A9: []rune{0x03B2}, // Additional folding + 0x1D6AA: []rune{0x03B3}, // Additional folding + 0x1D6AB: []rune{0x03B4}, // Additional folding + 0x1D6AC: []rune{0x03B5}, // Additional folding + 0x1D6AD: []rune{0x03B6}, // Additional folding + 0x1D6AE: []rune{0x03B7}, // Additional folding + 0x1D6AF: []rune{0x03B8}, // Additional folding + 0x1D6B0: []rune{0x03B9}, // Additional folding + 0x1D6B1: []rune{0x03BA}, // Additional folding + 0x1D6B2: []rune{0x03BB}, // Additional folding + 0x1D6B3: []rune{0x03BC}, // Additional folding + 0x1D6B4: []rune{0x03BD}, // Additional folding + 0x1D6B5: []rune{0x03BE}, // Additional folding + 0x1D6B6: []rune{0x03BF}, // Additional folding + 0x1D6B7: []rune{0x03C0}, // Additional folding + 0x1D6B8: []rune{0x03C1}, // Additional folding + 0x1D6B9: []rune{0x03B8}, // Additional folding + 0x1D6BA: []rune{0x03C3}, // Additional folding + 0x1D6BB: []rune{0x03C4}, // Additional folding + 0x1D6BC: []rune{0x03C5}, // Additional folding + 0x1D6BD: []rune{0x03C6}, // Additional folding + 0x1D6BE: []rune{0x03C7}, // Additional folding + 0x1D6BF: []rune{0x03C8}, // Additional folding + 0x1D6C0: []rune{0x03C9}, // Additional folding + 0x1D6D3: []rune{0x03C3}, // Additional folding + 0x1D6E2: []rune{0x03B1}, // Additional folding + 0x1D6E3: []rune{0x03B2}, // Additional folding + 0x1D6E4: []rune{0x03B3}, // Additional folding + 0x1D6E5: []rune{0x03B4}, // Additional folding + 0x1D6E6: []rune{0x03B5}, // Additional folding + 0x1D6E7: []rune{0x03B6}, // Additional folding + 0x1D6E8: []rune{0x03B7}, // Additional folding + 0x1D6E9: []rune{0x03B8}, // Additional folding + 0x1D6EA: []rune{0x03B9}, // Additional folding + 0x1D6EB: []rune{0x03BA}, // Additional folding + 0x1D6EC: []rune{0x03BB}, // Additional folding + 0x1D6ED: []rune{0x03BC}, // Additional folding + 0x1D6EE: []rune{0x03BD}, // Additional folding + 0x1D6EF: []rune{0x03BE}, // Additional folding + 0x1D6F0: []rune{0x03BF}, // Additional folding + 0x1D6F1: []rune{0x03C0}, // Additional folding + 0x1D6F2: []rune{0x03C1}, // Additional folding + 0x1D6F3: []rune{0x03B8}, // Additional folding + 0x1D6F4: []rune{0x03C3}, // Additional folding + 0x1D6F5: []rune{0x03C4}, // Additional folding + 0x1D6F6: []rune{0x03C5}, // Additional folding + 0x1D6F7: []rune{0x03C6}, // Additional folding + 0x1D6F8: []rune{0x03C7}, // Additional folding + 0x1D6F9: []rune{0x03C8}, // Additional folding + 0x1D6FA: []rune{0x03C9}, // Additional folding + 0x1D70D: []rune{0x03C3}, // Additional folding + 0x1D71C: []rune{0x03B1}, // Additional folding + 0x1D71D: []rune{0x03B2}, // Additional folding + 0x1D71E: []rune{0x03B3}, // Additional folding + 0x1D71F: []rune{0x03B4}, // Additional folding + 0x1D720: []rune{0x03B5}, // Additional folding + 0x1D721: []rune{0x03B6}, // Additional folding + 0x1D722: []rune{0x03B7}, // Additional folding + 0x1D723: []rune{0x03B8}, // Additional folding + 0x1D724: []rune{0x03B9}, // Additional folding + 0x1D725: []rune{0x03BA}, // Additional folding + 0x1D726: []rune{0x03BB}, // Additional folding + 0x1D727: []rune{0x03BC}, // Additional folding + 0x1D728: []rune{0x03BD}, // Additional folding + 0x1D729: []rune{0x03BE}, // Additional folding + 0x1D72A: []rune{0x03BF}, // Additional folding + 0x1D72B: []rune{0x03C0}, // Additional folding + 0x1D72C: []rune{0x03C1}, // Additional folding + 0x1D72D: []rune{0x03B8}, // Additional folding + 0x1D72E: []rune{0x03C3}, // Additional folding + 0x1D72F: []rune{0x03C4}, // Additional folding + 0x1D730: []rune{0x03C5}, // Additional folding + 0x1D731: []rune{0x03C6}, // Additional folding + 0x1D732: []rune{0x03C7}, // Additional folding + 0x1D733: []rune{0x03C8}, // Additional folding + 0x1D734: []rune{0x03C9}, // Additional folding + 0x1D747: []rune{0x03C3}, // Additional folding + 0x1D756: []rune{0x03B1}, // Additional folding + 0x1D757: []rune{0x03B2}, // Additional folding + 0x1D758: []rune{0x03B3}, // Additional folding + 0x1D759: []rune{0x03B4}, // Additional folding + 0x1D75A: []rune{0x03B5}, // Additional folding + 0x1D75B: []rune{0x03B6}, // Additional folding + 0x1D75C: []rune{0x03B7}, // Additional folding + 0x1D75D: []rune{0x03B8}, // Additional folding + 0x1D75E: []rune{0x03B9}, // Additional folding + 0x1D75F: []rune{0x03BA}, // Additional folding + 0x1D760: []rune{0x03BB}, // Additional folding + 0x1D761: []rune{0x03BC}, // Additional folding + 0x1D762: []rune{0x03BD}, // Additional folding + 0x1D763: []rune{0x03BE}, // Additional folding + 0x1D764: []rune{0x03BF}, // Additional folding + 0x1D765: []rune{0x03C0}, // Additional folding + 0x1D766: []rune{0x03C1}, // Additional folding + 0x1D767: []rune{0x03B8}, // Additional folding + 0x1D768: []rune{0x03C3}, // Additional folding + 0x1D769: []rune{0x03C4}, // Additional folding + 0x1D76A: []rune{0x03C5}, // Additional folding + 0x1D76B: []rune{0x03C6}, // Additional folding + 0x1D76C: []rune{0x03C7}, // Additional folding + 0x1D76D: []rune{0x03C8}, // Additional folding + 0x1D76E: []rune{0x03C9}, // Additional folding + 0x1D781: []rune{0x03C3}, // Additional folding + 0x1D790: []rune{0x03B1}, // Additional folding + 0x1D791: []rune{0x03B2}, // Additional folding + 0x1D792: []rune{0x03B3}, // Additional folding + 0x1D793: []rune{0x03B4}, // Additional folding + 0x1D794: []rune{0x03B5}, // Additional folding + 0x1D795: []rune{0x03B6}, // Additional folding + 0x1D796: []rune{0x03B7}, // Additional folding + 0x1D797: []rune{0x03B8}, // Additional folding + 0x1D798: []rune{0x03B9}, // Additional folding + 0x1D799: []rune{0x03BA}, // Additional folding + 0x1D79A: []rune{0x03BB}, // Additional folding + 0x1D79B: []rune{0x03BC}, // Additional folding + 0x1D79C: []rune{0x03BD}, // Additional folding + 0x1D79D: []rune{0x03BE}, // Additional folding + 0x1D79E: []rune{0x03BF}, // Additional folding + 0x1D79F: []rune{0x03C0}, // Additional folding + 0x1D7A0: []rune{0x03C1}, // Additional folding + 0x1D7A1: []rune{0x03B8}, // Additional folding + 0x1D7A2: []rune{0x03C3}, // Additional folding + 0x1D7A3: []rune{0x03C4}, // Additional folding + 0x1D7A4: []rune{0x03C5}, // Additional folding + 0x1D7A5: []rune{0x03C6}, // Additional folding + 0x1D7A6: []rune{0x03C7}, // Additional folding + 0x1D7A7: []rune{0x03C8}, // Additional folding + 0x1D7A8: []rune{0x03C9}, // Additional folding + 0x1D7BB: []rune{0x03C3}, // Additional folding +} + +// TableB2 represents RFC-3454 Table B.2. +var TableB2 Mapping = tableB2 + +var tableB3 = Mapping{ + 0x0041: []rune{0x0061}, // Case map + 0x0042: []rune{0x0062}, // Case map + 0x0043: []rune{0x0063}, // Case map + 0x0044: []rune{0x0064}, // Case map + 0x0045: []rune{0x0065}, // Case map + 0x0046: []rune{0x0066}, // Case map + 0x0047: []rune{0x0067}, // Case map + 0x0048: []rune{0x0068}, // Case map + 0x0049: []rune{0x0069}, // Case map + 0x004A: []rune{0x006A}, // Case map + 0x004B: []rune{0x006B}, // Case map + 0x004C: []rune{0x006C}, // Case map + 0x004D: []rune{0x006D}, // Case map + 0x004E: []rune{0x006E}, // Case map + 0x004F: []rune{0x006F}, // Case map + 0x0050: []rune{0x0070}, // Case map + 0x0051: []rune{0x0071}, // Case map + 0x0052: []rune{0x0072}, // Case map + 0x0053: []rune{0x0073}, // Case map + 0x0054: []rune{0x0074}, // Case map + 0x0055: []rune{0x0075}, // Case map + 0x0056: []rune{0x0076}, // Case map + 0x0057: []rune{0x0077}, // Case map + 0x0058: []rune{0x0078}, // Case map + 0x0059: []rune{0x0079}, // Case map + 0x005A: []rune{0x007A}, // Case map + 0x00B5: []rune{0x03BC}, // Case map + 0x00C0: []rune{0x00E0}, // Case map + 0x00C1: []rune{0x00E1}, // Case map + 0x00C2: []rune{0x00E2}, // Case map + 0x00C3: []rune{0x00E3}, // Case map + 0x00C4: []rune{0x00E4}, // Case map + 0x00C5: []rune{0x00E5}, // Case map + 0x00C6: []rune{0x00E6}, // Case map + 0x00C7: []rune{0x00E7}, // Case map + 0x00C8: []rune{0x00E8}, // Case map + 0x00C9: []rune{0x00E9}, // Case map + 0x00CA: []rune{0x00EA}, // Case map + 0x00CB: []rune{0x00EB}, // Case map + 0x00CC: []rune{0x00EC}, // Case map + 0x00CD: []rune{0x00ED}, // Case map + 0x00CE: []rune{0x00EE}, // Case map + 0x00CF: []rune{0x00EF}, // Case map + 0x00D0: []rune{0x00F0}, // Case map + 0x00D1: []rune{0x00F1}, // Case map + 0x00D2: []rune{0x00F2}, // Case map + 0x00D3: []rune{0x00F3}, // Case map + 0x00D4: []rune{0x00F4}, // Case map + 0x00D5: []rune{0x00F5}, // Case map + 0x00D6: []rune{0x00F6}, // Case map + 0x00D8: []rune{0x00F8}, // Case map + 0x00D9: []rune{0x00F9}, // Case map + 0x00DA: []rune{0x00FA}, // Case map + 0x00DB: []rune{0x00FB}, // Case map + 0x00DC: []rune{0x00FC}, // Case map + 0x00DD: []rune{0x00FD}, // Case map + 0x00DE: []rune{0x00FE}, // Case map + 0x00DF: []rune{0x0073, 0x0073}, // Case map + 0x0100: []rune{0x0101}, // Case map + 0x0102: []rune{0x0103}, // Case map + 0x0104: []rune{0x0105}, // Case map + 0x0106: []rune{0x0107}, // Case map + 0x0108: []rune{0x0109}, // Case map + 0x010A: []rune{0x010B}, // Case map + 0x010C: []rune{0x010D}, // Case map + 0x010E: []rune{0x010F}, // Case map + 0x0110: []rune{0x0111}, // Case map + 0x0112: []rune{0x0113}, // Case map + 0x0114: []rune{0x0115}, // Case map + 0x0116: []rune{0x0117}, // Case map + 0x0118: []rune{0x0119}, // Case map + 0x011A: []rune{0x011B}, // Case map + 0x011C: []rune{0x011D}, // Case map + 0x011E: []rune{0x011F}, // Case map + 0x0120: []rune{0x0121}, // Case map + 0x0122: []rune{0x0123}, // Case map + 0x0124: []rune{0x0125}, // Case map + 0x0126: []rune{0x0127}, // Case map + 0x0128: []rune{0x0129}, // Case map + 0x012A: []rune{0x012B}, // Case map + 0x012C: []rune{0x012D}, // Case map + 0x012E: []rune{0x012F}, // Case map + 0x0130: []rune{0x0069, 0x0307}, // Case map + 0x0132: []rune{0x0133}, // Case map + 0x0134: []rune{0x0135}, // Case map + 0x0136: []rune{0x0137}, // Case map + 0x0139: []rune{0x013A}, // Case map + 0x013B: []rune{0x013C}, // Case map + 0x013D: []rune{0x013E}, // Case map + 0x013F: []rune{0x0140}, // Case map + 0x0141: []rune{0x0142}, // Case map + 0x0143: []rune{0x0144}, // Case map + 0x0145: []rune{0x0146}, // Case map + 0x0147: []rune{0x0148}, // Case map + 0x0149: []rune{0x02BC, 0x006E}, // Case map + 0x014A: []rune{0x014B}, // Case map + 0x014C: []rune{0x014D}, // Case map + 0x014E: []rune{0x014F}, // Case map + 0x0150: []rune{0x0151}, // Case map + 0x0152: []rune{0x0153}, // Case map + 0x0154: []rune{0x0155}, // Case map + 0x0156: []rune{0x0157}, // Case map + 0x0158: []rune{0x0159}, // Case map + 0x015A: []rune{0x015B}, // Case map + 0x015C: []rune{0x015D}, // Case map + 0x015E: []rune{0x015F}, // Case map + 0x0160: []rune{0x0161}, // Case map + 0x0162: []rune{0x0163}, // Case map + 0x0164: []rune{0x0165}, // Case map + 0x0166: []rune{0x0167}, // Case map + 0x0168: []rune{0x0169}, // Case map + 0x016A: []rune{0x016B}, // Case map + 0x016C: []rune{0x016D}, // Case map + 0x016E: []rune{0x016F}, // Case map + 0x0170: []rune{0x0171}, // Case map + 0x0172: []rune{0x0173}, // Case map + 0x0174: []rune{0x0175}, // Case map + 0x0176: []rune{0x0177}, // Case map + 0x0178: []rune{0x00FF}, // Case map + 0x0179: []rune{0x017A}, // Case map + 0x017B: []rune{0x017C}, // Case map + 0x017D: []rune{0x017E}, // Case map + 0x017F: []rune{0x0073}, // Case map + 0x0181: []rune{0x0253}, // Case map + 0x0182: []rune{0x0183}, // Case map + 0x0184: []rune{0x0185}, // Case map + 0x0186: []rune{0x0254}, // Case map + 0x0187: []rune{0x0188}, // Case map + 0x0189: []rune{0x0256}, // Case map + 0x018A: []rune{0x0257}, // Case map + 0x018B: []rune{0x018C}, // Case map + 0x018E: []rune{0x01DD}, // Case map + 0x018F: []rune{0x0259}, // Case map + 0x0190: []rune{0x025B}, // Case map + 0x0191: []rune{0x0192}, // Case map + 0x0193: []rune{0x0260}, // Case map + 0x0194: []rune{0x0263}, // Case map + 0x0196: []rune{0x0269}, // Case map + 0x0197: []rune{0x0268}, // Case map + 0x0198: []rune{0x0199}, // Case map + 0x019C: []rune{0x026F}, // Case map + 0x019D: []rune{0x0272}, // Case map + 0x019F: []rune{0x0275}, // Case map + 0x01A0: []rune{0x01A1}, // Case map + 0x01A2: []rune{0x01A3}, // Case map + 0x01A4: []rune{0x01A5}, // Case map + 0x01A6: []rune{0x0280}, // Case map + 0x01A7: []rune{0x01A8}, // Case map + 0x01A9: []rune{0x0283}, // Case map + 0x01AC: []rune{0x01AD}, // Case map + 0x01AE: []rune{0x0288}, // Case map + 0x01AF: []rune{0x01B0}, // Case map + 0x01B1: []rune{0x028A}, // Case map + 0x01B2: []rune{0x028B}, // Case map + 0x01B3: []rune{0x01B4}, // Case map + 0x01B5: []rune{0x01B6}, // Case map + 0x01B7: []rune{0x0292}, // Case map + 0x01B8: []rune{0x01B9}, // Case map + 0x01BC: []rune{0x01BD}, // Case map + 0x01C4: []rune{0x01C6}, // Case map + 0x01C5: []rune{0x01C6}, // Case map + 0x01C7: []rune{0x01C9}, // Case map + 0x01C8: []rune{0x01C9}, // Case map + 0x01CA: []rune{0x01CC}, // Case map + 0x01CB: []rune{0x01CC}, // Case map + 0x01CD: []rune{0x01CE}, // Case map + 0x01CF: []rune{0x01D0}, // Case map + 0x01D1: []rune{0x01D2}, // Case map + 0x01D3: []rune{0x01D4}, // Case map + 0x01D5: []rune{0x01D6}, // Case map + 0x01D7: []rune{0x01D8}, // Case map + 0x01D9: []rune{0x01DA}, // Case map + 0x01DB: []rune{0x01DC}, // Case map + 0x01DE: []rune{0x01DF}, // Case map + 0x01E0: []rune{0x01E1}, // Case map + 0x01E2: []rune{0x01E3}, // Case map + 0x01E4: []rune{0x01E5}, // Case map + 0x01E6: []rune{0x01E7}, // Case map + 0x01E8: []rune{0x01E9}, // Case map + 0x01EA: []rune{0x01EB}, // Case map + 0x01EC: []rune{0x01ED}, // Case map + 0x01EE: []rune{0x01EF}, // Case map + 0x01F0: []rune{0x006A, 0x030C}, // Case map + 0x01F1: []rune{0x01F3}, // Case map + 0x01F2: []rune{0x01F3}, // Case map + 0x01F4: []rune{0x01F5}, // Case map + 0x01F6: []rune{0x0195}, // Case map + 0x01F7: []rune{0x01BF}, // Case map + 0x01F8: []rune{0x01F9}, // Case map + 0x01FA: []rune{0x01FB}, // Case map + 0x01FC: []rune{0x01FD}, // Case map + 0x01FE: []rune{0x01FF}, // Case map + 0x0200: []rune{0x0201}, // Case map + 0x0202: []rune{0x0203}, // Case map + 0x0204: []rune{0x0205}, // Case map + 0x0206: []rune{0x0207}, // Case map + 0x0208: []rune{0x0209}, // Case map + 0x020A: []rune{0x020B}, // Case map + 0x020C: []rune{0x020D}, // Case map + 0x020E: []rune{0x020F}, // Case map + 0x0210: []rune{0x0211}, // Case map + 0x0212: []rune{0x0213}, // Case map + 0x0214: []rune{0x0215}, // Case map + 0x0216: []rune{0x0217}, // Case map + 0x0218: []rune{0x0219}, // Case map + 0x021A: []rune{0x021B}, // Case map + 0x021C: []rune{0x021D}, // Case map + 0x021E: []rune{0x021F}, // Case map + 0x0220: []rune{0x019E}, // Case map + 0x0222: []rune{0x0223}, // Case map + 0x0224: []rune{0x0225}, // Case map + 0x0226: []rune{0x0227}, // Case map + 0x0228: []rune{0x0229}, // Case map + 0x022A: []rune{0x022B}, // Case map + 0x022C: []rune{0x022D}, // Case map + 0x022E: []rune{0x022F}, // Case map + 0x0230: []rune{0x0231}, // Case map + 0x0232: []rune{0x0233}, // Case map + 0x0345: []rune{0x03B9}, // Case map + 0x0386: []rune{0x03AC}, // Case map + 0x0388: []rune{0x03AD}, // Case map + 0x0389: []rune{0x03AE}, // Case map + 0x038A: []rune{0x03AF}, // Case map + 0x038C: []rune{0x03CC}, // Case map + 0x038E: []rune{0x03CD}, // Case map + 0x038F: []rune{0x03CE}, // Case map + 0x0390: []rune{0x03B9, 0x0308, 0x0301}, // Case map + 0x0391: []rune{0x03B1}, // Case map + 0x0392: []rune{0x03B2}, // Case map + 0x0393: []rune{0x03B3}, // Case map + 0x0394: []rune{0x03B4}, // Case map + 0x0395: []rune{0x03B5}, // Case map + 0x0396: []rune{0x03B6}, // Case map + 0x0397: []rune{0x03B7}, // Case map + 0x0398: []rune{0x03B8}, // Case map + 0x0399: []rune{0x03B9}, // Case map + 0x039A: []rune{0x03BA}, // Case map + 0x039B: []rune{0x03BB}, // Case map + 0x039C: []rune{0x03BC}, // Case map + 0x039D: []rune{0x03BD}, // Case map + 0x039E: []rune{0x03BE}, // Case map + 0x039F: []rune{0x03BF}, // Case map + 0x03A0: []rune{0x03C0}, // Case map + 0x03A1: []rune{0x03C1}, // Case map + 0x03A3: []rune{0x03C3}, // Case map + 0x03A4: []rune{0x03C4}, // Case map + 0x03A5: []rune{0x03C5}, // Case map + 0x03A6: []rune{0x03C6}, // Case map + 0x03A7: []rune{0x03C7}, // Case map + 0x03A8: []rune{0x03C8}, // Case map + 0x03A9: []rune{0x03C9}, // Case map + 0x03AA: []rune{0x03CA}, // Case map + 0x03AB: []rune{0x03CB}, // Case map + 0x03B0: []rune{0x03C5, 0x0308, 0x0301}, // Case map + 0x03C2: []rune{0x03C3}, // Case map + 0x03D0: []rune{0x03B2}, // Case map + 0x03D1: []rune{0x03B8}, // Case map + 0x03D5: []rune{0x03C6}, // Case map + 0x03D6: []rune{0x03C0}, // Case map + 0x03D8: []rune{0x03D9}, // Case map + 0x03DA: []rune{0x03DB}, // Case map + 0x03DC: []rune{0x03DD}, // Case map + 0x03DE: []rune{0x03DF}, // Case map + 0x03E0: []rune{0x03E1}, // Case map + 0x03E2: []rune{0x03E3}, // Case map + 0x03E4: []rune{0x03E5}, // Case map + 0x03E6: []rune{0x03E7}, // Case map + 0x03E8: []rune{0x03E9}, // Case map + 0x03EA: []rune{0x03EB}, // Case map + 0x03EC: []rune{0x03ED}, // Case map + 0x03EE: []rune{0x03EF}, // Case map + 0x03F0: []rune{0x03BA}, // Case map + 0x03F1: []rune{0x03C1}, // Case map + 0x03F2: []rune{0x03C3}, // Case map + 0x03F4: []rune{0x03B8}, // Case map + 0x03F5: []rune{0x03B5}, // Case map + 0x0400: []rune{0x0450}, // Case map + 0x0401: []rune{0x0451}, // Case map + 0x0402: []rune{0x0452}, // Case map + 0x0403: []rune{0x0453}, // Case map + 0x0404: []rune{0x0454}, // Case map + 0x0405: []rune{0x0455}, // Case map + 0x0406: []rune{0x0456}, // Case map + 0x0407: []rune{0x0457}, // Case map + 0x0408: []rune{0x0458}, // Case map + 0x0409: []rune{0x0459}, // Case map + 0x040A: []rune{0x045A}, // Case map + 0x040B: []rune{0x045B}, // Case map + 0x040C: []rune{0x045C}, // Case map + 0x040D: []rune{0x045D}, // Case map + 0x040E: []rune{0x045E}, // Case map + 0x040F: []rune{0x045F}, // Case map + 0x0410: []rune{0x0430}, // Case map + 0x0411: []rune{0x0431}, // Case map + 0x0412: []rune{0x0432}, // Case map + 0x0413: []rune{0x0433}, // Case map + 0x0414: []rune{0x0434}, // Case map + 0x0415: []rune{0x0435}, // Case map + 0x0416: []rune{0x0436}, // Case map + 0x0417: []rune{0x0437}, // Case map + 0x0418: []rune{0x0438}, // Case map + 0x0419: []rune{0x0439}, // Case map + 0x041A: []rune{0x043A}, // Case map + 0x041B: []rune{0x043B}, // Case map + 0x041C: []rune{0x043C}, // Case map + 0x041D: []rune{0x043D}, // Case map + 0x041E: []rune{0x043E}, // Case map + 0x041F: []rune{0x043F}, // Case map + 0x0420: []rune{0x0440}, // Case map + 0x0421: []rune{0x0441}, // Case map + 0x0422: []rune{0x0442}, // Case map + 0x0423: []rune{0x0443}, // Case map + 0x0424: []rune{0x0444}, // Case map + 0x0425: []rune{0x0445}, // Case map + 0x0426: []rune{0x0446}, // Case map + 0x0427: []rune{0x0447}, // Case map + 0x0428: []rune{0x0448}, // Case map + 0x0429: []rune{0x0449}, // Case map + 0x042A: []rune{0x044A}, // Case map + 0x042B: []rune{0x044B}, // Case map + 0x042C: []rune{0x044C}, // Case map + 0x042D: []rune{0x044D}, // Case map + 0x042E: []rune{0x044E}, // Case map + 0x042F: []rune{0x044F}, // Case map + 0x0460: []rune{0x0461}, // Case map + 0x0462: []rune{0x0463}, // Case map + 0x0464: []rune{0x0465}, // Case map + 0x0466: []rune{0x0467}, // Case map + 0x0468: []rune{0x0469}, // Case map + 0x046A: []rune{0x046B}, // Case map + 0x046C: []rune{0x046D}, // Case map + 0x046E: []rune{0x046F}, // Case map + 0x0470: []rune{0x0471}, // Case map + 0x0472: []rune{0x0473}, // Case map + 0x0474: []rune{0x0475}, // Case map + 0x0476: []rune{0x0477}, // Case map + 0x0478: []rune{0x0479}, // Case map + 0x047A: []rune{0x047B}, // Case map + 0x047C: []rune{0x047D}, // Case map + 0x047E: []rune{0x047F}, // Case map + 0x0480: []rune{0x0481}, // Case map + 0x048A: []rune{0x048B}, // Case map + 0x048C: []rune{0x048D}, // Case map + 0x048E: []rune{0x048F}, // Case map + 0x0490: []rune{0x0491}, // Case map + 0x0492: []rune{0x0493}, // Case map + 0x0494: []rune{0x0495}, // Case map + 0x0496: []rune{0x0497}, // Case map + 0x0498: []rune{0x0499}, // Case map + 0x049A: []rune{0x049B}, // Case map + 0x049C: []rune{0x049D}, // Case map + 0x049E: []rune{0x049F}, // Case map + 0x04A0: []rune{0x04A1}, // Case map + 0x04A2: []rune{0x04A3}, // Case map + 0x04A4: []rune{0x04A5}, // Case map + 0x04A6: []rune{0x04A7}, // Case map + 0x04A8: []rune{0x04A9}, // Case map + 0x04AA: []rune{0x04AB}, // Case map + 0x04AC: []rune{0x04AD}, // Case map + 0x04AE: []rune{0x04AF}, // Case map + 0x04B0: []rune{0x04B1}, // Case map + 0x04B2: []rune{0x04B3}, // Case map + 0x04B4: []rune{0x04B5}, // Case map + 0x04B6: []rune{0x04B7}, // Case map + 0x04B8: []rune{0x04B9}, // Case map + 0x04BA: []rune{0x04BB}, // Case map + 0x04BC: []rune{0x04BD}, // Case map + 0x04BE: []rune{0x04BF}, // Case map + 0x04C1: []rune{0x04C2}, // Case map + 0x04C3: []rune{0x04C4}, // Case map + 0x04C5: []rune{0x04C6}, // Case map + 0x04C7: []rune{0x04C8}, // Case map + 0x04C9: []rune{0x04CA}, // Case map + 0x04CB: []rune{0x04CC}, // Case map + 0x04CD: []rune{0x04CE}, // Case map + 0x04D0: []rune{0x04D1}, // Case map + 0x04D2: []rune{0x04D3}, // Case map + 0x04D4: []rune{0x04D5}, // Case map + 0x04D6: []rune{0x04D7}, // Case map + 0x04D8: []rune{0x04D9}, // Case map + 0x04DA: []rune{0x04DB}, // Case map + 0x04DC: []rune{0x04DD}, // Case map + 0x04DE: []rune{0x04DF}, // Case map + 0x04E0: []rune{0x04E1}, // Case map + 0x04E2: []rune{0x04E3}, // Case map + 0x04E4: []rune{0x04E5}, // Case map + 0x04E6: []rune{0x04E7}, // Case map + 0x04E8: []rune{0x04E9}, // Case map + 0x04EA: []rune{0x04EB}, // Case map + 0x04EC: []rune{0x04ED}, // Case map + 0x04EE: []rune{0x04EF}, // Case map + 0x04F0: []rune{0x04F1}, // Case map + 0x04F2: []rune{0x04F3}, // Case map + 0x04F4: []rune{0x04F5}, // Case map + 0x04F8: []rune{0x04F9}, // Case map + 0x0500: []rune{0x0501}, // Case map + 0x0502: []rune{0x0503}, // Case map + 0x0504: []rune{0x0505}, // Case map + 0x0506: []rune{0x0507}, // Case map + 0x0508: []rune{0x0509}, // Case map + 0x050A: []rune{0x050B}, // Case map + 0x050C: []rune{0x050D}, // Case map + 0x050E: []rune{0x050F}, // Case map + 0x0531: []rune{0x0561}, // Case map + 0x0532: []rune{0x0562}, // Case map + 0x0533: []rune{0x0563}, // Case map + 0x0534: []rune{0x0564}, // Case map + 0x0535: []rune{0x0565}, // Case map + 0x0536: []rune{0x0566}, // Case map + 0x0537: []rune{0x0567}, // Case map + 0x0538: []rune{0x0568}, // Case map + 0x0539: []rune{0x0569}, // Case map + 0x053A: []rune{0x056A}, // Case map + 0x053B: []rune{0x056B}, // Case map + 0x053C: []rune{0x056C}, // Case map + 0x053D: []rune{0x056D}, // Case map + 0x053E: []rune{0x056E}, // Case map + 0x053F: []rune{0x056F}, // Case map + 0x0540: []rune{0x0570}, // Case map + 0x0541: []rune{0x0571}, // Case map + 0x0542: []rune{0x0572}, // Case map + 0x0543: []rune{0x0573}, // Case map + 0x0544: []rune{0x0574}, // Case map + 0x0545: []rune{0x0575}, // Case map + 0x0546: []rune{0x0576}, // Case map + 0x0547: []rune{0x0577}, // Case map + 0x0548: []rune{0x0578}, // Case map + 0x0549: []rune{0x0579}, // Case map + 0x054A: []rune{0x057A}, // Case map + 0x054B: []rune{0x057B}, // Case map + 0x054C: []rune{0x057C}, // Case map + 0x054D: []rune{0x057D}, // Case map + 0x054E: []rune{0x057E}, // Case map + 0x054F: []rune{0x057F}, // Case map + 0x0550: []rune{0x0580}, // Case map + 0x0551: []rune{0x0581}, // Case map + 0x0552: []rune{0x0582}, // Case map + 0x0553: []rune{0x0583}, // Case map + 0x0554: []rune{0x0584}, // Case map + 0x0555: []rune{0x0585}, // Case map + 0x0556: []rune{0x0586}, // Case map + 0x0587: []rune{0x0565, 0x0582}, // Case map + 0x1E00: []rune{0x1E01}, // Case map + 0x1E02: []rune{0x1E03}, // Case map + 0x1E04: []rune{0x1E05}, // Case map + 0x1E06: []rune{0x1E07}, // Case map + 0x1E08: []rune{0x1E09}, // Case map + 0x1E0A: []rune{0x1E0B}, // Case map + 0x1E0C: []rune{0x1E0D}, // Case map + 0x1E0E: []rune{0x1E0F}, // Case map + 0x1E10: []rune{0x1E11}, // Case map + 0x1E12: []rune{0x1E13}, // Case map + 0x1E14: []rune{0x1E15}, // Case map + 0x1E16: []rune{0x1E17}, // Case map + 0x1E18: []rune{0x1E19}, // Case map + 0x1E1A: []rune{0x1E1B}, // Case map + 0x1E1C: []rune{0x1E1D}, // Case map + 0x1E1E: []rune{0x1E1F}, // Case map + 0x1E20: []rune{0x1E21}, // Case map + 0x1E22: []rune{0x1E23}, // Case map + 0x1E24: []rune{0x1E25}, // Case map + 0x1E26: []rune{0x1E27}, // Case map + 0x1E28: []rune{0x1E29}, // Case map + 0x1E2A: []rune{0x1E2B}, // Case map + 0x1E2C: []rune{0x1E2D}, // Case map + 0x1E2E: []rune{0x1E2F}, // Case map + 0x1E30: []rune{0x1E31}, // Case map + 0x1E32: []rune{0x1E33}, // Case map + 0x1E34: []rune{0x1E35}, // Case map + 0x1E36: []rune{0x1E37}, // Case map + 0x1E38: []rune{0x1E39}, // Case map + 0x1E3A: []rune{0x1E3B}, // Case map + 0x1E3C: []rune{0x1E3D}, // Case map + 0x1E3E: []rune{0x1E3F}, // Case map + 0x1E40: []rune{0x1E41}, // Case map + 0x1E42: []rune{0x1E43}, // Case map + 0x1E44: []rune{0x1E45}, // Case map + 0x1E46: []rune{0x1E47}, // Case map + 0x1E48: []rune{0x1E49}, // Case map + 0x1E4A: []rune{0x1E4B}, // Case map + 0x1E4C: []rune{0x1E4D}, // Case map + 0x1E4E: []rune{0x1E4F}, // Case map + 0x1E50: []rune{0x1E51}, // Case map + 0x1E52: []rune{0x1E53}, // Case map + 0x1E54: []rune{0x1E55}, // Case map + 0x1E56: []rune{0x1E57}, // Case map + 0x1E58: []rune{0x1E59}, // Case map + 0x1E5A: []rune{0x1E5B}, // Case map + 0x1E5C: []rune{0x1E5D}, // Case map + 0x1E5E: []rune{0x1E5F}, // Case map + 0x1E60: []rune{0x1E61}, // Case map + 0x1E62: []rune{0x1E63}, // Case map + 0x1E64: []rune{0x1E65}, // Case map + 0x1E66: []rune{0x1E67}, // Case map + 0x1E68: []rune{0x1E69}, // Case map + 0x1E6A: []rune{0x1E6B}, // Case map + 0x1E6C: []rune{0x1E6D}, // Case map + 0x1E6E: []rune{0x1E6F}, // Case map + 0x1E70: []rune{0x1E71}, // Case map + 0x1E72: []rune{0x1E73}, // Case map + 0x1E74: []rune{0x1E75}, // Case map + 0x1E76: []rune{0x1E77}, // Case map + 0x1E78: []rune{0x1E79}, // Case map + 0x1E7A: []rune{0x1E7B}, // Case map + 0x1E7C: []rune{0x1E7D}, // Case map + 0x1E7E: []rune{0x1E7F}, // Case map + 0x1E80: []rune{0x1E81}, // Case map + 0x1E82: []rune{0x1E83}, // Case map + 0x1E84: []rune{0x1E85}, // Case map + 0x1E86: []rune{0x1E87}, // Case map + 0x1E88: []rune{0x1E89}, // Case map + 0x1E8A: []rune{0x1E8B}, // Case map + 0x1E8C: []rune{0x1E8D}, // Case map + 0x1E8E: []rune{0x1E8F}, // Case map + 0x1E90: []rune{0x1E91}, // Case map + 0x1E92: []rune{0x1E93}, // Case map + 0x1E94: []rune{0x1E95}, // Case map + 0x1E96: []rune{0x0068, 0x0331}, // Case map + 0x1E97: []rune{0x0074, 0x0308}, // Case map + 0x1E98: []rune{0x0077, 0x030A}, // Case map + 0x1E99: []rune{0x0079, 0x030A}, // Case map + 0x1E9A: []rune{0x0061, 0x02BE}, // Case map + 0x1E9B: []rune{0x1E61}, // Case map + 0x1EA0: []rune{0x1EA1}, // Case map + 0x1EA2: []rune{0x1EA3}, // Case map + 0x1EA4: []rune{0x1EA5}, // Case map + 0x1EA6: []rune{0x1EA7}, // Case map + 0x1EA8: []rune{0x1EA9}, // Case map + 0x1EAA: []rune{0x1EAB}, // Case map + 0x1EAC: []rune{0x1EAD}, // Case map + 0x1EAE: []rune{0x1EAF}, // Case map + 0x1EB0: []rune{0x1EB1}, // Case map + 0x1EB2: []rune{0x1EB3}, // Case map + 0x1EB4: []rune{0x1EB5}, // Case map + 0x1EB6: []rune{0x1EB7}, // Case map + 0x1EB8: []rune{0x1EB9}, // Case map + 0x1EBA: []rune{0x1EBB}, // Case map + 0x1EBC: []rune{0x1EBD}, // Case map + 0x1EBE: []rune{0x1EBF}, // Case map + 0x1EC0: []rune{0x1EC1}, // Case map + 0x1EC2: []rune{0x1EC3}, // Case map + 0x1EC4: []rune{0x1EC5}, // Case map + 0x1EC6: []rune{0x1EC7}, // Case map + 0x1EC8: []rune{0x1EC9}, // Case map + 0x1ECA: []rune{0x1ECB}, // Case map + 0x1ECC: []rune{0x1ECD}, // Case map + 0x1ECE: []rune{0x1ECF}, // Case map + 0x1ED0: []rune{0x1ED1}, // Case map + 0x1ED2: []rune{0x1ED3}, // Case map + 0x1ED4: []rune{0x1ED5}, // Case map + 0x1ED6: []rune{0x1ED7}, // Case map + 0x1ED8: []rune{0x1ED9}, // Case map + 0x1EDA: []rune{0x1EDB}, // Case map + 0x1EDC: []rune{0x1EDD}, // Case map + 0x1EDE: []rune{0x1EDF}, // Case map + 0x1EE0: []rune{0x1EE1}, // Case map + 0x1EE2: []rune{0x1EE3}, // Case map + 0x1EE4: []rune{0x1EE5}, // Case map + 0x1EE6: []rune{0x1EE7}, // Case map + 0x1EE8: []rune{0x1EE9}, // Case map + 0x1EEA: []rune{0x1EEB}, // Case map + 0x1EEC: []rune{0x1EED}, // Case map + 0x1EEE: []rune{0x1EEF}, // Case map + 0x1EF0: []rune{0x1EF1}, // Case map + 0x1EF2: []rune{0x1EF3}, // Case map + 0x1EF4: []rune{0x1EF5}, // Case map + 0x1EF6: []rune{0x1EF7}, // Case map + 0x1EF8: []rune{0x1EF9}, // Case map + 0x1F08: []rune{0x1F00}, // Case map + 0x1F09: []rune{0x1F01}, // Case map + 0x1F0A: []rune{0x1F02}, // Case map + 0x1F0B: []rune{0x1F03}, // Case map + 0x1F0C: []rune{0x1F04}, // Case map + 0x1F0D: []rune{0x1F05}, // Case map + 0x1F0E: []rune{0x1F06}, // Case map + 0x1F0F: []rune{0x1F07}, // Case map + 0x1F18: []rune{0x1F10}, // Case map + 0x1F19: []rune{0x1F11}, // Case map + 0x1F1A: []rune{0x1F12}, // Case map + 0x1F1B: []rune{0x1F13}, // Case map + 0x1F1C: []rune{0x1F14}, // Case map + 0x1F1D: []rune{0x1F15}, // Case map + 0x1F28: []rune{0x1F20}, // Case map + 0x1F29: []rune{0x1F21}, // Case map + 0x1F2A: []rune{0x1F22}, // Case map + 0x1F2B: []rune{0x1F23}, // Case map + 0x1F2C: []rune{0x1F24}, // Case map + 0x1F2D: []rune{0x1F25}, // Case map + 0x1F2E: []rune{0x1F26}, // Case map + 0x1F2F: []rune{0x1F27}, // Case map + 0x1F38: []rune{0x1F30}, // Case map + 0x1F39: []rune{0x1F31}, // Case map + 0x1F3A: []rune{0x1F32}, // Case map + 0x1F3B: []rune{0x1F33}, // Case map + 0x1F3C: []rune{0x1F34}, // Case map + 0x1F3D: []rune{0x1F35}, // Case map + 0x1F3E: []rune{0x1F36}, // Case map + 0x1F3F: []rune{0x1F37}, // Case map + 0x1F48: []rune{0x1F40}, // Case map + 0x1F49: []rune{0x1F41}, // Case map + 0x1F4A: []rune{0x1F42}, // Case map + 0x1F4B: []rune{0x1F43}, // Case map + 0x1F4C: []rune{0x1F44}, // Case map + 0x1F4D: []rune{0x1F45}, // Case map + 0x1F50: []rune{0x03C5, 0x0313}, // Case map + 0x1F52: []rune{0x03C5, 0x0313, 0x0300}, // Case map + 0x1F54: []rune{0x03C5, 0x0313, 0x0301}, // Case map + 0x1F56: []rune{0x03C5, 0x0313, 0x0342}, // Case map + 0x1F59: []rune{0x1F51}, // Case map + 0x1F5B: []rune{0x1F53}, // Case map + 0x1F5D: []rune{0x1F55}, // Case map + 0x1F5F: []rune{0x1F57}, // Case map + 0x1F68: []rune{0x1F60}, // Case map + 0x1F69: []rune{0x1F61}, // Case map + 0x1F6A: []rune{0x1F62}, // Case map + 0x1F6B: []rune{0x1F63}, // Case map + 0x1F6C: []rune{0x1F64}, // Case map + 0x1F6D: []rune{0x1F65}, // Case map + 0x1F6E: []rune{0x1F66}, // Case map + 0x1F6F: []rune{0x1F67}, // Case map + 0x1F80: []rune{0x1F00, 0x03B9}, // Case map + 0x1F81: []rune{0x1F01, 0x03B9}, // Case map + 0x1F82: []rune{0x1F02, 0x03B9}, // Case map + 0x1F83: []rune{0x1F03, 0x03B9}, // Case map + 0x1F84: []rune{0x1F04, 0x03B9}, // Case map + 0x1F85: []rune{0x1F05, 0x03B9}, // Case map + 0x1F86: []rune{0x1F06, 0x03B9}, // Case map + 0x1F87: []rune{0x1F07, 0x03B9}, // Case map + 0x1F88: []rune{0x1F00, 0x03B9}, // Case map + 0x1F89: []rune{0x1F01, 0x03B9}, // Case map + 0x1F8A: []rune{0x1F02, 0x03B9}, // Case map + 0x1F8B: []rune{0x1F03, 0x03B9}, // Case map + 0x1F8C: []rune{0x1F04, 0x03B9}, // Case map + 0x1F8D: []rune{0x1F05, 0x03B9}, // Case map + 0x1F8E: []rune{0x1F06, 0x03B9}, // Case map + 0x1F8F: []rune{0x1F07, 0x03B9}, // Case map + 0x1F90: []rune{0x1F20, 0x03B9}, // Case map + 0x1F91: []rune{0x1F21, 0x03B9}, // Case map + 0x1F92: []rune{0x1F22, 0x03B9}, // Case map + 0x1F93: []rune{0x1F23, 0x03B9}, // Case map + 0x1F94: []rune{0x1F24, 0x03B9}, // Case map + 0x1F95: []rune{0x1F25, 0x03B9}, // Case map + 0x1F96: []rune{0x1F26, 0x03B9}, // Case map + 0x1F97: []rune{0x1F27, 0x03B9}, // Case map + 0x1F98: []rune{0x1F20, 0x03B9}, // Case map + 0x1F99: []rune{0x1F21, 0x03B9}, // Case map + 0x1F9A: []rune{0x1F22, 0x03B9}, // Case map + 0x1F9B: []rune{0x1F23, 0x03B9}, // Case map + 0x1F9C: []rune{0x1F24, 0x03B9}, // Case map + 0x1F9D: []rune{0x1F25, 0x03B9}, // Case map + 0x1F9E: []rune{0x1F26, 0x03B9}, // Case map + 0x1F9F: []rune{0x1F27, 0x03B9}, // Case map + 0x1FA0: []rune{0x1F60, 0x03B9}, // Case map + 0x1FA1: []rune{0x1F61, 0x03B9}, // Case map + 0x1FA2: []rune{0x1F62, 0x03B9}, // Case map + 0x1FA3: []rune{0x1F63, 0x03B9}, // Case map + 0x1FA4: []rune{0x1F64, 0x03B9}, // Case map + 0x1FA5: []rune{0x1F65, 0x03B9}, // Case map + 0x1FA6: []rune{0x1F66, 0x03B9}, // Case map + 0x1FA7: []rune{0x1F67, 0x03B9}, // Case map + 0x1FA8: []rune{0x1F60, 0x03B9}, // Case map + 0x1FA9: []rune{0x1F61, 0x03B9}, // Case map + 0x1FAA: []rune{0x1F62, 0x03B9}, // Case map + 0x1FAB: []rune{0x1F63, 0x03B9}, // Case map + 0x1FAC: []rune{0x1F64, 0x03B9}, // Case map + 0x1FAD: []rune{0x1F65, 0x03B9}, // Case map + 0x1FAE: []rune{0x1F66, 0x03B9}, // Case map + 0x1FAF: []rune{0x1F67, 0x03B9}, // Case map + 0x1FB2: []rune{0x1F70, 0x03B9}, // Case map + 0x1FB3: []rune{0x03B1, 0x03B9}, // Case map + 0x1FB4: []rune{0x03AC, 0x03B9}, // Case map + 0x1FB6: []rune{0x03B1, 0x0342}, // Case map + 0x1FB7: []rune{0x03B1, 0x0342, 0x03B9}, // Case map + 0x1FB8: []rune{0x1FB0}, // Case map + 0x1FB9: []rune{0x1FB1}, // Case map + 0x1FBA: []rune{0x1F70}, // Case map + 0x1FBB: []rune{0x1F71}, // Case map + 0x1FBC: []rune{0x03B1, 0x03B9}, // Case map + 0x1FBE: []rune{0x03B9}, // Case map + 0x1FC2: []rune{0x1F74, 0x03B9}, // Case map + 0x1FC3: []rune{0x03B7, 0x03B9}, // Case map + 0x1FC4: []rune{0x03AE, 0x03B9}, // Case map + 0x1FC6: []rune{0x03B7, 0x0342}, // Case map + 0x1FC7: []rune{0x03B7, 0x0342, 0x03B9}, // Case map + 0x1FC8: []rune{0x1F72}, // Case map + 0x1FC9: []rune{0x1F73}, // Case map + 0x1FCA: []rune{0x1F74}, // Case map + 0x1FCB: []rune{0x1F75}, // Case map + 0x1FCC: []rune{0x03B7, 0x03B9}, // Case map + 0x1FD2: []rune{0x03B9, 0x0308, 0x0300}, // Case map + 0x1FD3: []rune{0x03B9, 0x0308, 0x0301}, // Case map + 0x1FD6: []rune{0x03B9, 0x0342}, // Case map + 0x1FD7: []rune{0x03B9, 0x0308, 0x0342}, // Case map + 0x1FD8: []rune{0x1FD0}, // Case map + 0x1FD9: []rune{0x1FD1}, // Case map + 0x1FDA: []rune{0x1F76}, // Case map + 0x1FDB: []rune{0x1F77}, // Case map + 0x1FE2: []rune{0x03C5, 0x0308, 0x0300}, // Case map + 0x1FE3: []rune{0x03C5, 0x0308, 0x0301}, // Case map + 0x1FE4: []rune{0x03C1, 0x0313}, // Case map + 0x1FE6: []rune{0x03C5, 0x0342}, // Case map + 0x1FE7: []rune{0x03C5, 0x0308, 0x0342}, // Case map + 0x1FE8: []rune{0x1FE0}, // Case map + 0x1FE9: []rune{0x1FE1}, // Case map + 0x1FEA: []rune{0x1F7A}, // Case map + 0x1FEB: []rune{0x1F7B}, // Case map + 0x1FEC: []rune{0x1FE5}, // Case map + 0x1FF2: []rune{0x1F7C, 0x03B9}, // Case map + 0x1FF3: []rune{0x03C9, 0x03B9}, // Case map + 0x1FF4: []rune{0x03CE, 0x03B9}, // Case map + 0x1FF6: []rune{0x03C9, 0x0342}, // Case map + 0x1FF7: []rune{0x03C9, 0x0342, 0x03B9}, // Case map + 0x1FF8: []rune{0x1F78}, // Case map + 0x1FF9: []rune{0x1F79}, // Case map + 0x1FFA: []rune{0x1F7C}, // Case map + 0x1FFB: []rune{0x1F7D}, // Case map + 0x1FFC: []rune{0x03C9, 0x03B9}, // Case map + 0x2126: []rune{0x03C9}, // Case map + 0x212A: []rune{0x006B}, // Case map + 0x212B: []rune{0x00E5}, // Case map + 0x2160: []rune{0x2170}, // Case map + 0x2161: []rune{0x2171}, // Case map + 0x2162: []rune{0x2172}, // Case map + 0x2163: []rune{0x2173}, // Case map + 0x2164: []rune{0x2174}, // Case map + 0x2165: []rune{0x2175}, // Case map + 0x2166: []rune{0x2176}, // Case map + 0x2167: []rune{0x2177}, // Case map + 0x2168: []rune{0x2178}, // Case map + 0x2169: []rune{0x2179}, // Case map + 0x216A: []rune{0x217A}, // Case map + 0x216B: []rune{0x217B}, // Case map + 0x216C: []rune{0x217C}, // Case map + 0x216D: []rune{0x217D}, // Case map + 0x216E: []rune{0x217E}, // Case map + 0x216F: []rune{0x217F}, // Case map + 0x24B6: []rune{0x24D0}, // Case map + 0x24B7: []rune{0x24D1}, // Case map + 0x24B8: []rune{0x24D2}, // Case map + 0x24B9: []rune{0x24D3}, // Case map + 0x24BA: []rune{0x24D4}, // Case map + 0x24BB: []rune{0x24D5}, // Case map + 0x24BC: []rune{0x24D6}, // Case map + 0x24BD: []rune{0x24D7}, // Case map + 0x24BE: []rune{0x24D8}, // Case map + 0x24BF: []rune{0x24D9}, // Case map + 0x24C0: []rune{0x24DA}, // Case map + 0x24C1: []rune{0x24DB}, // Case map + 0x24C2: []rune{0x24DC}, // Case map + 0x24C3: []rune{0x24DD}, // Case map + 0x24C4: []rune{0x24DE}, // Case map + 0x24C5: []rune{0x24DF}, // Case map + 0x24C6: []rune{0x24E0}, // Case map + 0x24C7: []rune{0x24E1}, // Case map + 0x24C8: []rune{0x24E2}, // Case map + 0x24C9: []rune{0x24E3}, // Case map + 0x24CA: []rune{0x24E4}, // Case map + 0x24CB: []rune{0x24E5}, // Case map + 0x24CC: []rune{0x24E6}, // Case map + 0x24CD: []rune{0x24E7}, // Case map + 0x24CE: []rune{0x24E8}, // Case map + 0x24CF: []rune{0x24E9}, // Case map + 0xFB00: []rune{0x0066, 0x0066}, // Case map + 0xFB01: []rune{0x0066, 0x0069}, // Case map + 0xFB02: []rune{0x0066, 0x006C}, // Case map + 0xFB03: []rune{0x0066, 0x0066, 0x0069}, // Case map + 0xFB04: []rune{0x0066, 0x0066, 0x006C}, // Case map + 0xFB05: []rune{0x0073, 0x0074}, // Case map + 0xFB06: []rune{0x0073, 0x0074}, // Case map + 0xFB13: []rune{0x0574, 0x0576}, // Case map + 0xFB14: []rune{0x0574, 0x0565}, // Case map + 0xFB15: []rune{0x0574, 0x056B}, // Case map + 0xFB16: []rune{0x057E, 0x0576}, // Case map + 0xFB17: []rune{0x0574, 0x056D}, // Case map + 0xFF21: []rune{0xFF41}, // Case map + 0xFF22: []rune{0xFF42}, // Case map + 0xFF23: []rune{0xFF43}, // Case map + 0xFF24: []rune{0xFF44}, // Case map + 0xFF25: []rune{0xFF45}, // Case map + 0xFF26: []rune{0xFF46}, // Case map + 0xFF27: []rune{0xFF47}, // Case map + 0xFF28: []rune{0xFF48}, // Case map + 0xFF29: []rune{0xFF49}, // Case map + 0xFF2A: []rune{0xFF4A}, // Case map + 0xFF2B: []rune{0xFF4B}, // Case map + 0xFF2C: []rune{0xFF4C}, // Case map + 0xFF2D: []rune{0xFF4D}, // Case map + 0xFF2E: []rune{0xFF4E}, // Case map + 0xFF2F: []rune{0xFF4F}, // Case map + 0xFF30: []rune{0xFF50}, // Case map + 0xFF31: []rune{0xFF51}, // Case map + 0xFF32: []rune{0xFF52}, // Case map + 0xFF33: []rune{0xFF53}, // Case map + 0xFF34: []rune{0xFF54}, // Case map + 0xFF35: []rune{0xFF55}, // Case map + 0xFF36: []rune{0xFF56}, // Case map + 0xFF37: []rune{0xFF57}, // Case map + 0xFF38: []rune{0xFF58}, // Case map + 0xFF39: []rune{0xFF59}, // Case map + 0xFF3A: []rune{0xFF5A}, // Case map + 0x10400: []rune{0x10428}, // Case map + 0x10401: []rune{0x10429}, // Case map + 0x10402: []rune{0x1042A}, // Case map + 0x10403: []rune{0x1042B}, // Case map + 0x10404: []rune{0x1042C}, // Case map + 0x10405: []rune{0x1042D}, // Case map + 0x10406: []rune{0x1042E}, // Case map + 0x10407: []rune{0x1042F}, // Case map + 0x10408: []rune{0x10430}, // Case map + 0x10409: []rune{0x10431}, // Case map + 0x1040A: []rune{0x10432}, // Case map + 0x1040B: []rune{0x10433}, // Case map + 0x1040C: []rune{0x10434}, // Case map + 0x1040D: []rune{0x10435}, // Case map + 0x1040E: []rune{0x10436}, // Case map + 0x1040F: []rune{0x10437}, // Case map + 0x10410: []rune{0x10438}, // Case map + 0x10411: []rune{0x10439}, // Case map + 0x10412: []rune{0x1043A}, // Case map + 0x10413: []rune{0x1043B}, // Case map + 0x10414: []rune{0x1043C}, // Case map + 0x10415: []rune{0x1043D}, // Case map + 0x10416: []rune{0x1043E}, // Case map + 0x10417: []rune{0x1043F}, // Case map + 0x10418: []rune{0x10440}, // Case map + 0x10419: []rune{0x10441}, // Case map + 0x1041A: []rune{0x10442}, // Case map + 0x1041B: []rune{0x10443}, // Case map + 0x1041C: []rune{0x10444}, // Case map + 0x1041D: []rune{0x10445}, // Case map + 0x1041E: []rune{0x10446}, // Case map + 0x1041F: []rune{0x10447}, // Case map + 0x10420: []rune{0x10448}, // Case map + 0x10421: []rune{0x10449}, // Case map + 0x10422: []rune{0x1044A}, // Case map + 0x10423: []rune{0x1044B}, // Case map + 0x10424: []rune{0x1044C}, // Case map + 0x10425: []rune{0x1044D}, // Case map +} + +// TableB3 represents RFC-3454 Table B.3. +var TableB3 Mapping = tableB3 + +var tableC1_1 = Set{ + RuneRange{0x0020, 0x0020}, // SPACE +} + +// TableC1_1 represents RFC-3454 Table C.1.1. +var TableC1_1 Set = tableC1_1 + +var tableC1_2 = Set{ + RuneRange{0x00A0, 0x00A0}, // NO-BREAK SPACE + RuneRange{0x1680, 0x1680}, // OGHAM SPACE MARK + RuneRange{0x2000, 0x2000}, // EN QUAD + RuneRange{0x2001, 0x2001}, // EM QUAD + RuneRange{0x2002, 0x2002}, // EN SPACE + RuneRange{0x2003, 0x2003}, // EM SPACE + RuneRange{0x2004, 0x2004}, // THREE-PER-EM SPACE + RuneRange{0x2005, 0x2005}, // FOUR-PER-EM SPACE + RuneRange{0x2006, 0x2006}, // SIX-PER-EM SPACE + RuneRange{0x2007, 0x2007}, // FIGURE SPACE + RuneRange{0x2008, 0x2008}, // PUNCTUATION SPACE + RuneRange{0x2009, 0x2009}, // THIN SPACE + RuneRange{0x200A, 0x200A}, // HAIR SPACE + RuneRange{0x200B, 0x200B}, // ZERO WIDTH SPACE + RuneRange{0x202F, 0x202F}, // NARROW NO-BREAK SPACE + RuneRange{0x205F, 0x205F}, // MEDIUM MATHEMATICAL SPACE + RuneRange{0x3000, 0x3000}, // IDEOGRAPHIC SPACE +} + +// TableC1_2 represents RFC-3454 Table C.1.2. +var TableC1_2 Set = tableC1_2 + +var tableC2_1 = Set{ + RuneRange{0x0000, 0x001F}, // [CONTROL CHARACTERS] + RuneRange{0x007F, 0x007F}, // DELETE +} + +// TableC2_1 represents RFC-3454 Table C.2.1. +var TableC2_1 Set = tableC2_1 + +var tableC2_2 = Set{ + RuneRange{0x0080, 0x009F}, // [CONTROL CHARACTERS] + RuneRange{0x06DD, 0x06DD}, // ARABIC END OF AYAH + RuneRange{0x070F, 0x070F}, // SYRIAC ABBREVIATION MARK + RuneRange{0x180E, 0x180E}, // MONGOLIAN VOWEL SEPARATOR + RuneRange{0x200C, 0x200C}, // ZERO WIDTH NON-JOINER + RuneRange{0x200D, 0x200D}, // ZERO WIDTH JOINER + RuneRange{0x2028, 0x2028}, // LINE SEPARATOR + RuneRange{0x2029, 0x2029}, // PARAGRAPH SEPARATOR + RuneRange{0x2060, 0x2060}, // WORD JOINER + RuneRange{0x2061, 0x2061}, // FUNCTION APPLICATION + RuneRange{0x2062, 0x2062}, // INVISIBLE TIMES + RuneRange{0x2063, 0x2063}, // INVISIBLE SEPARATOR + RuneRange{0x206A, 0x206F}, // [CONTROL CHARACTERS] + RuneRange{0xFEFF, 0xFEFF}, // ZERO WIDTH NO-BREAK SPACE + RuneRange{0xFFF9, 0xFFFC}, // [CONTROL CHARACTERS] + RuneRange{0x1D173, 0x1D17A}, // [MUSICAL CONTROL CHARACTERS] +} + +// TableC2_2 represents RFC-3454 Table C.2.2. +var TableC2_2 Set = tableC2_2 + +var tableC3 = Set{ + RuneRange{0xE000, 0xF8FF}, // [PRIVATE USE, PLANE 0] + RuneRange{0xF0000, 0xFFFFD}, // [PRIVATE USE, PLANE 15] + RuneRange{0x100000, 0x10FFFD}, // [PRIVATE USE, PLANE 16] +} + +// TableC3 represents RFC-3454 Table C.3. +var TableC3 Set = tableC3 + +var tableC4 = Set{ + RuneRange{0xFDD0, 0xFDEF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xFFFE, 0xFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x1FFFE, 0x1FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x2FFFE, 0x2FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x3FFFE, 0x3FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x4FFFE, 0x4FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x5FFFE, 0x5FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x6FFFE, 0x6FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x7FFFE, 0x7FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x8FFFE, 0x8FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x9FFFE, 0x9FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xAFFFE, 0xAFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xBFFFE, 0xBFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xCFFFE, 0xCFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xDFFFE, 0xDFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xEFFFE, 0xEFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xFFFFE, 0xFFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x10FFFE, 0x10FFFF}, // [NONCHARACTER CODE POINTS] +} + +// TableC4 represents RFC-3454 Table C.4. +var TableC4 Set = tableC4 + +var tableC5 = Set{ + RuneRange{0xD800, 0xDFFF}, // [SURROGATE CODES] +} + +// TableC5 represents RFC-3454 Table C.5. +var TableC5 Set = tableC5 + +var tableC6 = Set{ + RuneRange{0xFFF9, 0xFFF9}, // INTERLINEAR ANNOTATION ANCHOR + RuneRange{0xFFFA, 0xFFFA}, // INTERLINEAR ANNOTATION SEPARATOR + RuneRange{0xFFFB, 0xFFFB}, // INTERLINEAR ANNOTATION TERMINATOR + RuneRange{0xFFFC, 0xFFFC}, // OBJECT REPLACEMENT CHARACTER + RuneRange{0xFFFD, 0xFFFD}, // REPLACEMENT CHARACTER +} + +// TableC6 represents RFC-3454 Table C.6. +var TableC6 Set = tableC6 + +var tableC7 = Set{ + RuneRange{0x2FF0, 0x2FFB}, // [IDEOGRAPHIC DESCRIPTION CHARACTERS] +} + +// TableC7 represents RFC-3454 Table C.7. +var TableC7 Set = tableC7 + +var tableC8 = Set{ + RuneRange{0x0340, 0x0340}, // COMBINING GRAVE TONE MARK + RuneRange{0x0341, 0x0341}, // COMBINING ACUTE TONE MARK + RuneRange{0x200E, 0x200E}, // LEFT-TO-RIGHT MARK + RuneRange{0x200F, 0x200F}, // RIGHT-TO-LEFT MARK + RuneRange{0x202A, 0x202A}, // LEFT-TO-RIGHT EMBEDDING + RuneRange{0x202B, 0x202B}, // RIGHT-TO-LEFT EMBEDDING + RuneRange{0x202C, 0x202C}, // POP DIRECTIONAL FORMATTING + RuneRange{0x202D, 0x202D}, // LEFT-TO-RIGHT OVERRIDE + RuneRange{0x202E, 0x202E}, // RIGHT-TO-LEFT OVERRIDE + RuneRange{0x206A, 0x206A}, // INHIBIT SYMMETRIC SWAPPING + RuneRange{0x206B, 0x206B}, // ACTIVATE SYMMETRIC SWAPPING + RuneRange{0x206C, 0x206C}, // INHIBIT ARABIC FORM SHAPING + RuneRange{0x206D, 0x206D}, // ACTIVATE ARABIC FORM SHAPING + RuneRange{0x206E, 0x206E}, // NATIONAL DIGIT SHAPES + RuneRange{0x206F, 0x206F}, // NOMINAL DIGIT SHAPES +} + +// TableC8 represents RFC-3454 Table C.8. +var TableC8 Set = tableC8 + +var tableC9 = Set{ + RuneRange{0xE0001, 0xE0001}, // LANGUAGE TAG + RuneRange{0xE0020, 0xE007F}, // [TAGGING CHARACTERS] +} + +// TableC9 represents RFC-3454 Table C.9. +var TableC9 Set = tableC9 + +var tableD1 = Set{ + RuneRange{0x05BE, 0x05BE}, + RuneRange{0x05C0, 0x05C0}, + RuneRange{0x05C3, 0x05C3}, + RuneRange{0x05D0, 0x05EA}, + RuneRange{0x05F0, 0x05F4}, + RuneRange{0x061B, 0x061B}, + RuneRange{0x061F, 0x061F}, + RuneRange{0x0621, 0x063A}, + RuneRange{0x0640, 0x064A}, + RuneRange{0x066D, 0x066F}, + RuneRange{0x0671, 0x06D5}, + RuneRange{0x06DD, 0x06DD}, + RuneRange{0x06E5, 0x06E6}, + RuneRange{0x06FA, 0x06FE}, + RuneRange{0x0700, 0x070D}, + RuneRange{0x0710, 0x0710}, + RuneRange{0x0712, 0x072C}, + RuneRange{0x0780, 0x07A5}, + RuneRange{0x07B1, 0x07B1}, + RuneRange{0x200F, 0x200F}, + RuneRange{0xFB1D, 0xFB1D}, + RuneRange{0xFB1F, 0xFB28}, + RuneRange{0xFB2A, 0xFB36}, + RuneRange{0xFB38, 0xFB3C}, + RuneRange{0xFB3E, 0xFB3E}, + RuneRange{0xFB40, 0xFB41}, + RuneRange{0xFB43, 0xFB44}, + RuneRange{0xFB46, 0xFBB1}, + RuneRange{0xFBD3, 0xFD3D}, + RuneRange{0xFD50, 0xFD8F}, + RuneRange{0xFD92, 0xFDC7}, + RuneRange{0xFDF0, 0xFDFC}, + RuneRange{0xFE70, 0xFE74}, + RuneRange{0xFE76, 0xFEFC}, +} + +// TableD1 represents RFC-3454 Table D.1. +var TableD1 Set = tableD1 + +var tableD2 = Set{ + RuneRange{0x0041, 0x005A}, + RuneRange{0x0061, 0x007A}, + RuneRange{0x00AA, 0x00AA}, + RuneRange{0x00B5, 0x00B5}, + RuneRange{0x00BA, 0x00BA}, + RuneRange{0x00C0, 0x00D6}, + RuneRange{0x00D8, 0x00F6}, + RuneRange{0x00F8, 0x0220}, + RuneRange{0x0222, 0x0233}, + RuneRange{0x0250, 0x02AD}, + RuneRange{0x02B0, 0x02B8}, + RuneRange{0x02BB, 0x02C1}, + RuneRange{0x02D0, 0x02D1}, + RuneRange{0x02E0, 0x02E4}, + RuneRange{0x02EE, 0x02EE}, + RuneRange{0x037A, 0x037A}, + RuneRange{0x0386, 0x0386}, + RuneRange{0x0388, 0x038A}, + RuneRange{0x038C, 0x038C}, + RuneRange{0x038E, 0x03A1}, + RuneRange{0x03A3, 0x03CE}, + RuneRange{0x03D0, 0x03F5}, + RuneRange{0x0400, 0x0482}, + RuneRange{0x048A, 0x04CE}, + RuneRange{0x04D0, 0x04F5}, + RuneRange{0x04F8, 0x04F9}, + RuneRange{0x0500, 0x050F}, + RuneRange{0x0531, 0x0556}, + RuneRange{0x0559, 0x055F}, + RuneRange{0x0561, 0x0587}, + RuneRange{0x0589, 0x0589}, + RuneRange{0x0903, 0x0903}, + RuneRange{0x0905, 0x0939}, + RuneRange{0x093D, 0x0940}, + RuneRange{0x0949, 0x094C}, + RuneRange{0x0950, 0x0950}, + RuneRange{0x0958, 0x0961}, + RuneRange{0x0964, 0x0970}, + RuneRange{0x0982, 0x0983}, + RuneRange{0x0985, 0x098C}, + RuneRange{0x098F, 0x0990}, + RuneRange{0x0993, 0x09A8}, + RuneRange{0x09AA, 0x09B0}, + RuneRange{0x09B2, 0x09B2}, + RuneRange{0x09B6, 0x09B9}, + RuneRange{0x09BE, 0x09C0}, + RuneRange{0x09C7, 0x09C8}, + RuneRange{0x09CB, 0x09CC}, + RuneRange{0x09D7, 0x09D7}, + RuneRange{0x09DC, 0x09DD}, + RuneRange{0x09DF, 0x09E1}, + RuneRange{0x09E6, 0x09F1}, + RuneRange{0x09F4, 0x09FA}, + RuneRange{0x0A05, 0x0A0A}, + RuneRange{0x0A0F, 0x0A10}, + RuneRange{0x0A13, 0x0A28}, + RuneRange{0x0A2A, 0x0A30}, + RuneRange{0x0A32, 0x0A33}, + RuneRange{0x0A35, 0x0A36}, + RuneRange{0x0A38, 0x0A39}, + RuneRange{0x0A3E, 0x0A40}, + RuneRange{0x0A59, 0x0A5C}, + RuneRange{0x0A5E, 0x0A5E}, + RuneRange{0x0A66, 0x0A6F}, + RuneRange{0x0A72, 0x0A74}, + RuneRange{0x0A83, 0x0A83}, + RuneRange{0x0A85, 0x0A8B}, + RuneRange{0x0A8D, 0x0A8D}, + RuneRange{0x0A8F, 0x0A91}, + RuneRange{0x0A93, 0x0AA8}, + RuneRange{0x0AAA, 0x0AB0}, + RuneRange{0x0AB2, 0x0AB3}, + RuneRange{0x0AB5, 0x0AB9}, + RuneRange{0x0ABD, 0x0AC0}, + RuneRange{0x0AC9, 0x0AC9}, + RuneRange{0x0ACB, 0x0ACC}, + RuneRange{0x0AD0, 0x0AD0}, + RuneRange{0x0AE0, 0x0AE0}, + RuneRange{0x0AE6, 0x0AEF}, + RuneRange{0x0B02, 0x0B03}, + RuneRange{0x0B05, 0x0B0C}, + RuneRange{0x0B0F, 0x0B10}, + RuneRange{0x0B13, 0x0B28}, + RuneRange{0x0B2A, 0x0B30}, + RuneRange{0x0B32, 0x0B33}, + RuneRange{0x0B36, 0x0B39}, + RuneRange{0x0B3D, 0x0B3E}, + RuneRange{0x0B40, 0x0B40}, + RuneRange{0x0B47, 0x0B48}, + RuneRange{0x0B4B, 0x0B4C}, + RuneRange{0x0B57, 0x0B57}, + RuneRange{0x0B5C, 0x0B5D}, + RuneRange{0x0B5F, 0x0B61}, + RuneRange{0x0B66, 0x0B70}, + RuneRange{0x0B83, 0x0B83}, + RuneRange{0x0B85, 0x0B8A}, + RuneRange{0x0B8E, 0x0B90}, + RuneRange{0x0B92, 0x0B95}, + RuneRange{0x0B99, 0x0B9A}, + RuneRange{0x0B9C, 0x0B9C}, + RuneRange{0x0B9E, 0x0B9F}, + RuneRange{0x0BA3, 0x0BA4}, + RuneRange{0x0BA8, 0x0BAA}, + RuneRange{0x0BAE, 0x0BB5}, + RuneRange{0x0BB7, 0x0BB9}, + RuneRange{0x0BBE, 0x0BBF}, + RuneRange{0x0BC1, 0x0BC2}, + RuneRange{0x0BC6, 0x0BC8}, + RuneRange{0x0BCA, 0x0BCC}, + RuneRange{0x0BD7, 0x0BD7}, + RuneRange{0x0BE7, 0x0BF2}, + RuneRange{0x0C01, 0x0C03}, + RuneRange{0x0C05, 0x0C0C}, + RuneRange{0x0C0E, 0x0C10}, + RuneRange{0x0C12, 0x0C28}, + RuneRange{0x0C2A, 0x0C33}, + RuneRange{0x0C35, 0x0C39}, + RuneRange{0x0C41, 0x0C44}, + RuneRange{0x0C60, 0x0C61}, + RuneRange{0x0C66, 0x0C6F}, + RuneRange{0x0C82, 0x0C83}, + RuneRange{0x0C85, 0x0C8C}, + RuneRange{0x0C8E, 0x0C90}, + RuneRange{0x0C92, 0x0CA8}, + RuneRange{0x0CAA, 0x0CB3}, + RuneRange{0x0CB5, 0x0CB9}, + RuneRange{0x0CBE, 0x0CBE}, + RuneRange{0x0CC0, 0x0CC4}, + RuneRange{0x0CC7, 0x0CC8}, + RuneRange{0x0CCA, 0x0CCB}, + RuneRange{0x0CD5, 0x0CD6}, + RuneRange{0x0CDE, 0x0CDE}, + RuneRange{0x0CE0, 0x0CE1}, + RuneRange{0x0CE6, 0x0CEF}, + RuneRange{0x0D02, 0x0D03}, + RuneRange{0x0D05, 0x0D0C}, + RuneRange{0x0D0E, 0x0D10}, + RuneRange{0x0D12, 0x0D28}, + RuneRange{0x0D2A, 0x0D39}, + RuneRange{0x0D3E, 0x0D40}, + RuneRange{0x0D46, 0x0D48}, + RuneRange{0x0D4A, 0x0D4C}, + RuneRange{0x0D57, 0x0D57}, + RuneRange{0x0D60, 0x0D61}, + RuneRange{0x0D66, 0x0D6F}, + RuneRange{0x0D82, 0x0D83}, + RuneRange{0x0D85, 0x0D96}, + RuneRange{0x0D9A, 0x0DB1}, + RuneRange{0x0DB3, 0x0DBB}, + RuneRange{0x0DBD, 0x0DBD}, + RuneRange{0x0DC0, 0x0DC6}, + RuneRange{0x0DCF, 0x0DD1}, + RuneRange{0x0DD8, 0x0DDF}, + RuneRange{0x0DF2, 0x0DF4}, + RuneRange{0x0E01, 0x0E30}, + RuneRange{0x0E32, 0x0E33}, + RuneRange{0x0E40, 0x0E46}, + RuneRange{0x0E4F, 0x0E5B}, + RuneRange{0x0E81, 0x0E82}, + RuneRange{0x0E84, 0x0E84}, + RuneRange{0x0E87, 0x0E88}, + RuneRange{0x0E8A, 0x0E8A}, + RuneRange{0x0E8D, 0x0E8D}, + RuneRange{0x0E94, 0x0E97}, + RuneRange{0x0E99, 0x0E9F}, + RuneRange{0x0EA1, 0x0EA3}, + RuneRange{0x0EA5, 0x0EA5}, + RuneRange{0x0EA7, 0x0EA7}, + RuneRange{0x0EAA, 0x0EAB}, + RuneRange{0x0EAD, 0x0EB0}, + RuneRange{0x0EB2, 0x0EB3}, + RuneRange{0x0EBD, 0x0EBD}, + RuneRange{0x0EC0, 0x0EC4}, + RuneRange{0x0EC6, 0x0EC6}, + RuneRange{0x0ED0, 0x0ED9}, + RuneRange{0x0EDC, 0x0EDD}, + RuneRange{0x0F00, 0x0F17}, + RuneRange{0x0F1A, 0x0F34}, + RuneRange{0x0F36, 0x0F36}, + RuneRange{0x0F38, 0x0F38}, + RuneRange{0x0F3E, 0x0F47}, + RuneRange{0x0F49, 0x0F6A}, + RuneRange{0x0F7F, 0x0F7F}, + RuneRange{0x0F85, 0x0F85}, + RuneRange{0x0F88, 0x0F8B}, + RuneRange{0x0FBE, 0x0FC5}, + RuneRange{0x0FC7, 0x0FCC}, + RuneRange{0x0FCF, 0x0FCF}, + RuneRange{0x1000, 0x1021}, + RuneRange{0x1023, 0x1027}, + RuneRange{0x1029, 0x102A}, + RuneRange{0x102C, 0x102C}, + RuneRange{0x1031, 0x1031}, + RuneRange{0x1038, 0x1038}, + RuneRange{0x1040, 0x1057}, + RuneRange{0x10A0, 0x10C5}, + RuneRange{0x10D0, 0x10F8}, + RuneRange{0x10FB, 0x10FB}, + RuneRange{0x1100, 0x1159}, + RuneRange{0x115F, 0x11A2}, + RuneRange{0x11A8, 0x11F9}, + RuneRange{0x1200, 0x1206}, + RuneRange{0x1208, 0x1246}, + RuneRange{0x1248, 0x1248}, + RuneRange{0x124A, 0x124D}, + RuneRange{0x1250, 0x1256}, + RuneRange{0x1258, 0x1258}, + RuneRange{0x125A, 0x125D}, + RuneRange{0x1260, 0x1286}, + RuneRange{0x1288, 0x1288}, + RuneRange{0x128A, 0x128D}, + RuneRange{0x1290, 0x12AE}, + RuneRange{0x12B0, 0x12B0}, + RuneRange{0x12B2, 0x12B5}, + RuneRange{0x12B8, 0x12BE}, + RuneRange{0x12C0, 0x12C0}, + RuneRange{0x12C2, 0x12C5}, + RuneRange{0x12C8, 0x12CE}, + RuneRange{0x12D0, 0x12D6}, + RuneRange{0x12D8, 0x12EE}, + RuneRange{0x12F0, 0x130E}, + RuneRange{0x1310, 0x1310}, + RuneRange{0x1312, 0x1315}, + RuneRange{0x1318, 0x131E}, + RuneRange{0x1320, 0x1346}, + RuneRange{0x1348, 0x135A}, + RuneRange{0x1361, 0x137C}, + RuneRange{0x13A0, 0x13F4}, + RuneRange{0x1401, 0x1676}, + RuneRange{0x1681, 0x169A}, + RuneRange{0x16A0, 0x16F0}, + RuneRange{0x1700, 0x170C}, + RuneRange{0x170E, 0x1711}, + RuneRange{0x1720, 0x1731}, + RuneRange{0x1735, 0x1736}, + RuneRange{0x1740, 0x1751}, + RuneRange{0x1760, 0x176C}, + RuneRange{0x176E, 0x1770}, + RuneRange{0x1780, 0x17B6}, + RuneRange{0x17BE, 0x17C5}, + RuneRange{0x17C7, 0x17C8}, + RuneRange{0x17D4, 0x17DA}, + RuneRange{0x17DC, 0x17DC}, + RuneRange{0x17E0, 0x17E9}, + RuneRange{0x1810, 0x1819}, + RuneRange{0x1820, 0x1877}, + RuneRange{0x1880, 0x18A8}, + RuneRange{0x1E00, 0x1E9B}, + RuneRange{0x1EA0, 0x1EF9}, + RuneRange{0x1F00, 0x1F15}, + RuneRange{0x1F18, 0x1F1D}, + RuneRange{0x1F20, 0x1F45}, + RuneRange{0x1F48, 0x1F4D}, + RuneRange{0x1F50, 0x1F57}, + RuneRange{0x1F59, 0x1F59}, + RuneRange{0x1F5B, 0x1F5B}, + RuneRange{0x1F5D, 0x1F5D}, + RuneRange{0x1F5F, 0x1F7D}, + RuneRange{0x1F80, 0x1FB4}, + RuneRange{0x1FB6, 0x1FBC}, + RuneRange{0x1FBE, 0x1FBE}, + RuneRange{0x1FC2, 0x1FC4}, + RuneRange{0x1FC6, 0x1FCC}, + RuneRange{0x1FD0, 0x1FD3}, + RuneRange{0x1FD6, 0x1FDB}, + RuneRange{0x1FE0, 0x1FEC}, + RuneRange{0x1FF2, 0x1FF4}, + RuneRange{0x1FF6, 0x1FFC}, + RuneRange{0x200E, 0x200E}, + RuneRange{0x2071, 0x2071}, + RuneRange{0x207F, 0x207F}, + RuneRange{0x2102, 0x2102}, + RuneRange{0x2107, 0x2107}, + RuneRange{0x210A, 0x2113}, + RuneRange{0x2115, 0x2115}, + RuneRange{0x2119, 0x211D}, + RuneRange{0x2124, 0x2124}, + RuneRange{0x2126, 0x2126}, + RuneRange{0x2128, 0x2128}, + RuneRange{0x212A, 0x212D}, + RuneRange{0x212F, 0x2131}, + RuneRange{0x2133, 0x2139}, + RuneRange{0x213D, 0x213F}, + RuneRange{0x2145, 0x2149}, + RuneRange{0x2160, 0x2183}, + RuneRange{0x2336, 0x237A}, + RuneRange{0x2395, 0x2395}, + RuneRange{0x249C, 0x24E9}, + RuneRange{0x3005, 0x3007}, + RuneRange{0x3021, 0x3029}, + RuneRange{0x3031, 0x3035}, + RuneRange{0x3038, 0x303C}, + RuneRange{0x3041, 0x3096}, + RuneRange{0x309D, 0x309F}, + RuneRange{0x30A1, 0x30FA}, + RuneRange{0x30FC, 0x30FF}, + RuneRange{0x3105, 0x312C}, + RuneRange{0x3131, 0x318E}, + RuneRange{0x3190, 0x31B7}, + RuneRange{0x31F0, 0x321C}, + RuneRange{0x3220, 0x3243}, + RuneRange{0x3260, 0x327B}, + RuneRange{0x327F, 0x32B0}, + RuneRange{0x32C0, 0x32CB}, + RuneRange{0x32D0, 0x32FE}, + RuneRange{0x3300, 0x3376}, + RuneRange{0x337B, 0x33DD}, + RuneRange{0x33E0, 0x33FE}, + RuneRange{0x3400, 0x4DB5}, + RuneRange{0x4E00, 0x9FA5}, + RuneRange{0xA000, 0xA48C}, + RuneRange{0xAC00, 0xD7A3}, + RuneRange{0xD800, 0xFA2D}, + RuneRange{0xFA30, 0xFA6A}, + RuneRange{0xFB00, 0xFB06}, + RuneRange{0xFB13, 0xFB17}, + RuneRange{0xFF21, 0xFF3A}, + RuneRange{0xFF41, 0xFF5A}, + RuneRange{0xFF66, 0xFFBE}, + RuneRange{0xFFC2, 0xFFC7}, + RuneRange{0xFFCA, 0xFFCF}, + RuneRange{0xFFD2, 0xFFD7}, + RuneRange{0xFFDA, 0xFFDC}, + RuneRange{0x10300, 0x1031E}, + RuneRange{0x10320, 0x10323}, + RuneRange{0x10330, 0x1034A}, + RuneRange{0x10400, 0x10425}, + RuneRange{0x10428, 0x1044D}, + RuneRange{0x1D000, 0x1D0F5}, + RuneRange{0x1D100, 0x1D126}, + RuneRange{0x1D12A, 0x1D166}, + RuneRange{0x1D16A, 0x1D172}, + RuneRange{0x1D183, 0x1D184}, + RuneRange{0x1D18C, 0x1D1A9}, + RuneRange{0x1D1AE, 0x1D1DD}, + RuneRange{0x1D400, 0x1D454}, + RuneRange{0x1D456, 0x1D49C}, + RuneRange{0x1D49E, 0x1D49F}, + RuneRange{0x1D4A2, 0x1D4A2}, + RuneRange{0x1D4A5, 0x1D4A6}, + RuneRange{0x1D4A9, 0x1D4AC}, + RuneRange{0x1D4AE, 0x1D4B9}, + RuneRange{0x1D4BB, 0x1D4BB}, + RuneRange{0x1D4BD, 0x1D4C0}, + RuneRange{0x1D4C2, 0x1D4C3}, + RuneRange{0x1D4C5, 0x1D505}, + RuneRange{0x1D507, 0x1D50A}, + RuneRange{0x1D50D, 0x1D514}, + RuneRange{0x1D516, 0x1D51C}, + RuneRange{0x1D51E, 0x1D539}, + RuneRange{0x1D53B, 0x1D53E}, + RuneRange{0x1D540, 0x1D544}, + RuneRange{0x1D546, 0x1D546}, + RuneRange{0x1D54A, 0x1D550}, + RuneRange{0x1D552, 0x1D6A3}, + RuneRange{0x1D6A8, 0x1D7C9}, + RuneRange{0x20000, 0x2A6D6}, + RuneRange{0x2F800, 0x2FA1D}, + RuneRange{0xF0000, 0xFFFFD}, + RuneRange{0x100000, 0x10FFFD}, +} + +// TableD2 represents RFC-3454 Table D.2. +var TableD2 Set = tableD2 diff --git a/vendor/go.mongodb.org/mongo-driver/LICENSE b/vendor/go.mongodb.org/mongo-driver/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.mongodb.org/mongo-driver/THIRD-PARTY-NOTICES b/vendor/go.mongodb.org/mongo-driver/THIRD-PARTY-NOTICES new file mode 100644 index 0000000..69d6433 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/THIRD-PARTY-NOTICES @@ -0,0 +1,1336 @@ +--------------------------------------------------------------------- +License notice for gopkg.in/mgo.v2/bson +--------------------------------------------------------------------- + +BSON library for Go + +Copyright (c) 2010-2013 - Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +--------------------------------------------------------------------- +License notice for JSON and CSV code from github.com/golang/go +--------------------------------------------------------------------- + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for github.com/davecgh/go-spew +---------------------------------------------------------------------- + +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/gobuffalo/genny +---------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright (c) 2019 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/gobuffalo/genny/genny +---------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright © 2018 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/gobuffalo/gogen +---------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright (c) 2019 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/gobuffalo/gogen/goimports +---------------------------------------------------------------------- + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for github.com/gobuffalo/logger +---------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright (c) 2018 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/gobuffalo/mapi +---------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright (c) 2018 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/gobuffalo/packd +---------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright (c) 2018 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/gobuffalo/packr/v2/packr2 +---------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright © 2018 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/gobuffalo/syncx +---------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright (c) 2018 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/golang/snappy +---------------------------------------------------------------------- + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for github.com/google/go-cmp +---------------------------------------------------------------------- + +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for github.com/karrick/godirwalk +---------------------------------------------------------------------- + +BSD 2-Clause License + +Copyright (c) 2017, Karrick McDermott +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for github.com/konsorten/go-windows-terminal-sequences +---------------------------------------------------------------------- + +(The MIT License) + +Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/kr/pretty +---------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/kr/text +---------------------------------------------------------------------- + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/markbates/oncer +---------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright (c) 2018 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/markbates/safe +---------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright (c) 2018 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/montanaflynn/stats +---------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright (c) 2014-2015 Montana Flynn (https://anonfunction.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/pelletier/go-toml +---------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/pkg/errors +---------------------------------------------------------------------- + +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for github.com/pmezard/go-difflib +---------------------------------------------------------------------- + +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for github.com/rogpeppe/go-internal +---------------------------------------------------------------------- + +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for github.com/sirupsen/logrus +---------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/stretchr/testify +---------------------------------------------------------------------- + +MIT License + +Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/tidwall/pretty +---------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright (c) 2017 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +---------------------------------------------------------------------- +License notice for github.com/xdg/scram +---------------------------------------------------------------------- + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +---------------------------------------------------------------------- +License notice for github.com/xdg/stringprep +---------------------------------------------------------------------- + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +---------------------------------------------------------------------- +License notice for golang.org/x/crypto +---------------------------------------------------------------------- + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for golang.org/x/net +---------------------------------------------------------------------- + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for golang.org/x/sync +---------------------------------------------------------------------- + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for golang.org/x/sys +---------------------------------------------------------------------- + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for golang.org/x/text +---------------------------------------------------------------------- + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for golang.org/x/tools +---------------------------------------------------------------------- + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +License notice for golang.org/x/tools/cmd/getgo +---------------------------------------------------------------------- + +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/vendor/go.mongodb.org/mongo-driver/bson/bson.go new file mode 100644 index 0000000..ae1a87f --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bson.go @@ -0,0 +1,50 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer +// See THIRD-PARTY-NOTICES for original license terms. + +// +build go1.9 + +package bson // import "go.mongodb.org/mongo-driver/bson" + +import ( + "go.mongodb.org/mongo-driver/bson/primitive" +) + +// Zeroer allows custom struct types to implement a report of zero +// state. All struct types that don't implement Zeroer or where IsZero +// returns false are considered to be not zero. +type Zeroer interface { + IsZero() bool +} + +// D is an ordered representation of a BSON document. This type should be used when the order of the elements matters, +// such as MongoDB command documents. If the order of the elements does not matter, an M should be used instead. +// +// Example usage: +// +// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +type D = primitive.D + +// E represents a BSON element for a D. It is usually used inside a D. +type E = primitive.E + +// M is an unordered representation of a BSON document. This type should be used when the order of the elements does not +// matter. This type is handled as a regular map[string]interface{} when encoding and decoding. Elements will be +// serialized in an undefined, random order. If the order of the elements matters, a D should be used instead. +// +// Example usage: +// +// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} +type M = primitive.M + +// An A is an ordered representation of a BSON array. +// +// Example usage: +// +// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} +type A = primitive.A diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go b/vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go new file mode 100644 index 0000000..bbe7792 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go @@ -0,0 +1,81 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// +build !go1.9 + +package bson // import "go.mongodb.org/mongo-driver/bson" + +import ( + "math" + "strconv" + "strings" +) + +// Zeroer allows custom struct types to implement a report of zero +// state. All struct types that don't implement Zeroer or where IsZero +// returns false are considered to be not zero. +type Zeroer interface { + IsZero() bool +} + +// D is an ordered representation of a BSON document. This type should be used when the order of the elements matters, +// such as MongoDB command documents. If the order of the elements does not matter, an M should be used instead. +// +// Example usage: +// +// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +type D []E + +// Map creates a map from the elements of the D. +func (d D) Map() M { + m := make(M, len(d)) + for _, e := range d { + m[e.Key] = e.Value + } + return m +} + +// E represents a BSON element for a D. It is usually used inside a D. +type E struct { + Key string + Value interface{} +} + +// M is an unordered representation of a BSON document. This type should be used when the order of the elements does not +// matter. This type is handled as a regular map[string]interface{} when encoding and decoding. Elements will be +// serialized in an undefined, random order. If the order of the elements matters, a D should be used instead. +// +// Example usage: +// +// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} +type M map[string]interface{} + +// An A is an ordered representation of a BSON array. +// +// Example usage: +// +// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} +type A []interface{} + +func formatDouble(f float64) string { + var s string + if math.IsInf(f, 1) { + s = "Infinity" + } else if math.IsInf(f, -1) { + s = "-Infinity" + } else if math.IsNaN(f) { + s = "NaN" + } else { + // Print exactly one decimalType place for integers; otherwise, print as many are necessary to + // perfectly represent it. + s = strconv.FormatFloat(f, 'G', -1, 64) + if !strings.ContainsRune(s, '.') { + s += ".0" + } + } + + return s +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go new file mode 100644 index 0000000..0ebc9a1 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go @@ -0,0 +1,163 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec // import "go.mongodb.org/mongo-driver/bson/bsoncodec" + +import ( + "fmt" + "reflect" + "strings" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// Marshaler is an interface implemented by types that can marshal themselves +// into a BSON document represented as bytes. The bytes returned must be a valid +// BSON document if the error is nil. +type Marshaler interface { + MarshalBSON() ([]byte, error) +} + +// ValueMarshaler is an interface implemented by types that can marshal +// themselves into a BSON value as bytes. The type must be the valid type for +// the bytes returned. The bytes and byte type together must be valid if the +// error is nil. +type ValueMarshaler interface { + MarshalBSONValue() (bsontype.Type, []byte, error) +} + +// Unmarshaler is an interface implemented by types that can unmarshal a BSON +// document representation of themselves. The BSON bytes can be assumed to be +// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data +// after returning. +type Unmarshaler interface { + UnmarshalBSON([]byte) error +} + +// ValueUnmarshaler is an interface implemented by types that can unmarshal a +// BSON value representaiton of themselves. The BSON bytes and type can be +// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it +// wishes to retain the data after returning. +type ValueUnmarshaler interface { + UnmarshalBSONValue(bsontype.Type, []byte) error +} + +// ValueEncoderError is an error returned from a ValueEncoder when the provided value can't be +// encoded by the ValueEncoder. +type ValueEncoderError struct { + Name string + Types []reflect.Type + Kinds []reflect.Kind + Received reflect.Value +} + +func (vee ValueEncoderError) Error() string { + typeKinds := make([]string, 0, len(vee.Types)+len(vee.Kinds)) + for _, t := range vee.Types { + typeKinds = append(typeKinds, t.String()) + } + for _, k := range vee.Kinds { + if k == reflect.Map { + typeKinds = append(typeKinds, "map[string]*") + continue + } + typeKinds = append(typeKinds, k.String()) + } + received := vee.Received.Kind().String() + if vee.Received.IsValid() { + received = vee.Received.Type().String() + } + return fmt.Sprintf("%s can only encode valid %s, but got %s", vee.Name, strings.Join(typeKinds, ", "), received) +} + +// ValueDecoderError is an error returned from a ValueDecoder when the provided value can't be +// decoded by the ValueDecoder. +type ValueDecoderError struct { + Name string + Types []reflect.Type + Kinds []reflect.Kind + Received reflect.Value +} + +func (vde ValueDecoderError) Error() string { + typeKinds := make([]string, 0, len(vde.Types)+len(vde.Kinds)) + for _, t := range vde.Types { + typeKinds = append(typeKinds, t.String()) + } + for _, k := range vde.Kinds { + if k == reflect.Map { + typeKinds = append(typeKinds, "map[string]*") + continue + } + typeKinds = append(typeKinds, k.String()) + } + received := vde.Received.Kind().String() + if vde.Received.IsValid() { + received = vde.Received.Type().String() + } + return fmt.Sprintf("%s can only decode valid and settable %s, but got %s", vde.Name, strings.Join(typeKinds, ", "), received) +} + +// EncodeContext is the contextual information required for a Codec to encode a +// value. +type EncodeContext struct { + *Registry + MinSize bool +} + +// DecodeContext is the contextual information required for a Codec to decode a +// value. +type DecodeContext struct { + *Registry + Truncate bool + // Ancestor is the type of a containing document. This is mainly used to determine what type + // should be used when decoding an embedded document into an empty interface. For example, if + // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface + // will be decoded into a bson.M. + Ancestor reflect.Type +} + +// ValueCodec is the interface that groups the methods to encode and decode +// values. +type ValueCodec interface { + ValueEncoder + ValueDecoder +} + +// ValueEncoder is the interface implemented by types that can handle the encoding of a value. +type ValueEncoder interface { + EncodeValue(EncodeContext, bsonrw.ValueWriter, reflect.Value) error +} + +// ValueEncoderFunc is an adapter function that allows a function with the correct signature to be +// used as a ValueEncoder. +type ValueEncoderFunc func(EncodeContext, bsonrw.ValueWriter, reflect.Value) error + +// EncodeValue implements the ValueEncoder interface. +func (fn ValueEncoderFunc) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + return fn(ec, vw, val) +} + +// ValueDecoder is the interface implemented by types that can handle the decoding of a value. +type ValueDecoder interface { + DecodeValue(DecodeContext, bsonrw.ValueReader, reflect.Value) error +} + +// ValueDecoderFunc is an adapter function that allows a function with the correct signature to be +// used as a ValueDecoder. +type ValueDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) error + +// DecodeValue implements the ValueDecoder interface. +func (fn ValueDecoderFunc) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + return fn(dc, vr, val) +} + +// CodecZeroer is the interface implemented by Codecs that can also determine if +// a value of the type that would be encoded is zero. +type CodecZeroer interface { + IsTypeZero(interface{}) bool +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go new file mode 100644 index 0000000..9eed911 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go @@ -0,0 +1,87 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +var defaultByteSliceCodec = NewByteSliceCodec() + +// ByteSliceCodec is the Codec used for []byte values. +type ByteSliceCodec struct { + EncodeNilAsEmpty bool +} + +var _ ValueCodec = &ByteSliceCodec{} + +// NewByteSliceCodec returns a StringCodec with options opts. +func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec { + byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...) + codec := ByteSliceCodec{} + if byteSliceOpt.EncodeNilAsEmpty != nil { + codec.EncodeNilAsEmpty = *byteSliceOpt.EncodeNilAsEmpty + } + return &codec +} + +// EncodeValue is the ValueEncoder for []byte. +func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tByteSlice { + return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + if val.IsNil() && !bsc.EncodeNilAsEmpty { + return vw.WriteNull() + } + return vw.WriteBinary(val.Interface().([]byte)) +} + +// DecodeValue is the ValueDecoder for []byte. +func (bsc *ByteSliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tByteSlice { + return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + + var data []byte + var err error + switch vrType := vr.Type(); vrType { + case bsontype.String: + str, err := vr.ReadString() + if err != nil { + return err + } + data = []byte(str) + case bsontype.Symbol: + sym, err := vr.ReadSymbol() + if err != nil { + return err + } + data = []byte(sym) + case bsontype.Binary: + var subtype byte + data, subtype, err = vr.ReadBinary() + if err != nil { + return err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) + } + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a []byte", vrType) + } + + val.Set(reflect.ValueOf(data)) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go new file mode 100644 index 0000000..cb8180f --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go @@ -0,0 +1,63 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonrw" +) + +// condAddrEncoder is the encoder used when a pointer to the encoding value has an encoder. +type condAddrEncoder struct { + canAddrEnc ValueEncoder + elseEnc ValueEncoder +} + +var _ ValueEncoder = (*condAddrEncoder)(nil) + +// newCondAddrEncoder returns an condAddrEncoder. +func newCondAddrEncoder(canAddrEnc, elseEnc ValueEncoder) *condAddrEncoder { + encoder := condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} + return &encoder +} + +// EncodeValue is the ValueEncoderFunc for a value that may be addressable. +func (cae *condAddrEncoder) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.CanAddr() { + return cae.canAddrEnc.EncodeValue(ec, vw, val) + } + if cae.elseEnc != nil { + return cae.elseEnc.EncodeValue(ec, vw, val) + } + return ErrNoEncoder{Type: val.Type()} +} + +// condAddrDecoder is the decoder used when a pointer to the value has a decoder. +type condAddrDecoder struct { + canAddrDec ValueDecoder + elseDec ValueDecoder +} + +var _ ValueDecoder = (*condAddrDecoder)(nil) + +// newCondAddrDecoder returns an CondAddrDecoder. +func newCondAddrDecoder(canAddrDec, elseDec ValueDecoder) *condAddrDecoder { + decoder := condAddrDecoder{canAddrDec: canAddrDec, elseDec: elseDec} + return &decoder +} + +// DecodeValue is the ValueDecoderFunc for a value that may be addressable. +func (cad *condAddrDecoder) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if val.CanAddr() { + return cad.canAddrDec.DecodeValue(dc, vr, val) + } + if cad.elseDec != nil { + return cad.elseDec.DecodeValue(dc, vr, val) + } + return ErrNoDecoder{Type: val.Type()} +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go new file mode 100644 index 0000000..52d2365 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -0,0 +1,1249 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "net/url" + "reflect" + "strconv" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var defaultValueDecoders DefaultValueDecoders + +// DefaultValueDecoders is a namespace type for the default ValueDecoders used +// when creating a registry. +type DefaultValueDecoders struct{} + +// RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with +// the provided RegistryBuilder. +// +// There is no support for decoding map[string]interface{} becuase there is no decoder for +// interface{}, so users must either register this decoder themselves or use the +// EmptyInterfaceDecoder avaialble in the bson package. +func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { + if rb == nil { + panic(errors.New("argument to RegisterDefaultDecoders must not be nil")) + } + + rb. + RegisterTypeDecoder(tBinary, ValueDecoderFunc(dvd.BinaryDecodeValue)). + RegisterTypeDecoder(tUndefined, ValueDecoderFunc(dvd.UndefinedDecodeValue)). + RegisterTypeDecoder(tDateTime, ValueDecoderFunc(dvd.DateTimeDecodeValue)). + RegisterTypeDecoder(tNull, ValueDecoderFunc(dvd.NullDecodeValue)). + RegisterTypeDecoder(tRegex, ValueDecoderFunc(dvd.RegexDecodeValue)). + RegisterTypeDecoder(tDBPointer, ValueDecoderFunc(dvd.DBPointerDecodeValue)). + RegisterTypeDecoder(tTimestamp, ValueDecoderFunc(dvd.TimestampDecodeValue)). + RegisterTypeDecoder(tMinKey, ValueDecoderFunc(dvd.MinKeyDecodeValue)). + RegisterTypeDecoder(tMaxKey, ValueDecoderFunc(dvd.MaxKeyDecodeValue)). + RegisterTypeDecoder(tJavaScript, ValueDecoderFunc(dvd.JavaScriptDecodeValue)). + RegisterTypeDecoder(tSymbol, ValueDecoderFunc(dvd.SymbolDecodeValue)). + RegisterTypeDecoder(tByteSlice, defaultByteSliceCodec). + RegisterTypeDecoder(tTime, defaultTimeCodec). + RegisterTypeDecoder(tEmpty, defaultEmptyInterfaceCodec). + RegisterTypeDecoder(tOID, ValueDecoderFunc(dvd.ObjectIDDecodeValue)). + RegisterTypeDecoder(tDecimal, ValueDecoderFunc(dvd.Decimal128DecodeValue)). + RegisterTypeDecoder(tJSONNumber, ValueDecoderFunc(dvd.JSONNumberDecodeValue)). + RegisterTypeDecoder(tURL, ValueDecoderFunc(dvd.URLDecodeValue)). + RegisterTypeDecoder(tCoreDocument, ValueDecoderFunc(dvd.CoreDocumentDecodeValue)). + RegisterTypeDecoder(tCodeWithScope, ValueDecoderFunc(dvd.CodeWithScopeDecodeValue)). + RegisterDefaultDecoder(reflect.Bool, ValueDecoderFunc(dvd.BooleanDecodeValue)). + RegisterDefaultDecoder(reflect.Int, ValueDecoderFunc(dvd.IntDecodeValue)). + RegisterDefaultDecoder(reflect.Int8, ValueDecoderFunc(dvd.IntDecodeValue)). + RegisterDefaultDecoder(reflect.Int16, ValueDecoderFunc(dvd.IntDecodeValue)). + RegisterDefaultDecoder(reflect.Int32, ValueDecoderFunc(dvd.IntDecodeValue)). + RegisterDefaultDecoder(reflect.Int64, ValueDecoderFunc(dvd.IntDecodeValue)). + RegisterDefaultDecoder(reflect.Uint, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Uint8, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Uint16, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Uint32, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Uint64, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Float32, ValueDecoderFunc(dvd.FloatDecodeValue)). + RegisterDefaultDecoder(reflect.Float64, ValueDecoderFunc(dvd.FloatDecodeValue)). + RegisterDefaultDecoder(reflect.Array, ValueDecoderFunc(dvd.ArrayDecodeValue)). + RegisterDefaultDecoder(reflect.Map, defaultMapCodec). + RegisterDefaultDecoder(reflect.Slice, defaultSliceCodec). + RegisterDefaultDecoder(reflect.String, defaultStringCodec). + RegisterDefaultDecoder(reflect.Struct, defaultStructCodec). + RegisterDefaultDecoder(reflect.Ptr, NewPointerCodec()). + RegisterTypeMapEntry(bsontype.Double, tFloat64). + RegisterTypeMapEntry(bsontype.String, tString). + RegisterTypeMapEntry(bsontype.Array, tA). + RegisterTypeMapEntry(bsontype.Binary, tBinary). + RegisterTypeMapEntry(bsontype.Undefined, tUndefined). + RegisterTypeMapEntry(bsontype.ObjectID, tOID). + RegisterTypeMapEntry(bsontype.Boolean, tBool). + RegisterTypeMapEntry(bsontype.DateTime, tDateTime). + RegisterTypeMapEntry(bsontype.Regex, tRegex). + RegisterTypeMapEntry(bsontype.DBPointer, tDBPointer). + RegisterTypeMapEntry(bsontype.JavaScript, tJavaScript). + RegisterTypeMapEntry(bsontype.Symbol, tSymbol). + RegisterTypeMapEntry(bsontype.CodeWithScope, tCodeWithScope). + RegisterTypeMapEntry(bsontype.Int32, tInt32). + RegisterTypeMapEntry(bsontype.Int64, tInt64). + RegisterTypeMapEntry(bsontype.Timestamp, tTimestamp). + RegisterTypeMapEntry(bsontype.Decimal128, tDecimal). + RegisterTypeMapEntry(bsontype.MinKey, tMinKey). + RegisterTypeMapEntry(bsontype.MaxKey, tMaxKey). + RegisterTypeMapEntry(bsontype.Type(0), tD). + RegisterTypeMapEntry(bsontype.EmbeddedDocument, tD). + RegisterHookDecoder(tValueUnmarshaler, ValueDecoderFunc(dvd.ValueUnmarshalerDecodeValue)). + RegisterHookDecoder(tUnmarshaler, ValueDecoderFunc(dvd.UnmarshalerDecodeValue)) +} + +// BooleanDecodeValue is the ValueDecoderFunc for bool types. +func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool { + return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} + } + + var b bool + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return err + } + b = (i32 != 0) + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return err + } + b = (i64 != 0) + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return err + } + b = (f64 != 0) + case bsontype.Boolean: + b, err = vr.ReadBoolean() + if err != nil { + return err + } + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return err + } + default: + return fmt.Errorf("cannot decode %v into a boolean", vrType) + } + val.SetBool(b) + return nil +} + +// IntDecodeValue is the ValueDecoderFunc for int types. +func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() { + return ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: val, + } + } + + var i64 int64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return err + } + i64 = int64(i32) + case bsontype.Int64: + i64, err = vr.ReadInt64() + if err != nil { + return err + } + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return err + } + if !dc.Truncate && math.Floor(f64) != f64 { + return errors.New("IntDecodeValue can only truncate float64 to an integer type when truncation is enabled") + } + if f64 > float64(math.MaxInt64) { + return fmt.Errorf("%g overflows int64", f64) + } + i64 = int64(f64) + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return err + } + if b { + i64 = 1 + } + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return err + } + default: + return fmt.Errorf("cannot decode %v into an integer type", vrType) + } + + switch val.Kind() { + case reflect.Int8: + if i64 < math.MinInt8 || i64 > math.MaxInt8 { + return fmt.Errorf("%d overflows int8", i64) + } + case reflect.Int16: + if i64 < math.MinInt16 || i64 > math.MaxInt16 { + return fmt.Errorf("%d overflows int16", i64) + } + case reflect.Int32: + if i64 < math.MinInt32 || i64 > math.MaxInt32 { + return fmt.Errorf("%d overflows int32", i64) + } + case reflect.Int64: + case reflect.Int: + if int64(int(i64)) != i64 { // Can we fit this inside of an int + return fmt.Errorf("%d overflows int", i64) + } + default: + return ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: val, + } + } + + val.SetInt(i64) + return nil +} + +// UintDecodeValue is the ValueDecoderFunc for uint types. +// This method is deprecated and does not have any stability guarantees. It may be removed in the +// future. Use UIntCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + var i64 int64 + var err error + switch vr.Type() { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return err + } + i64 = int64(i32) + case bsontype.Int64: + i64, err = vr.ReadInt64() + if err != nil { + return err + } + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return err + } + if !dc.Truncate && math.Floor(f64) != f64 { + return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled") + } + if f64 > float64(math.MaxInt64) { + return fmt.Errorf("%g overflows int64", f64) + } + i64 = int64(f64) + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return err + } + if b { + i64 = 1 + } + default: + return fmt.Errorf("cannot decode %v into an integer type", vr.Type()) + } + + if !val.CanSet() { + return ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } + } + + switch val.Kind() { + case reflect.Uint8: + if i64 < 0 || i64 > math.MaxUint8 { + return fmt.Errorf("%d overflows uint8", i64) + } + case reflect.Uint16: + if i64 < 0 || i64 > math.MaxUint16 { + return fmt.Errorf("%d overflows uint16", i64) + } + case reflect.Uint32: + if i64 < 0 || i64 > math.MaxUint32 { + return fmt.Errorf("%d overflows uint32", i64) + } + case reflect.Uint64: + if i64 < 0 { + return fmt.Errorf("%d overflows uint64", i64) + } + case reflect.Uint: + if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint + return fmt.Errorf("%d overflows uint", i64) + } + default: + return ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } + } + + val.SetUint(uint64(i64)) + return nil +} + +// FloatDecodeValue is the ValueDecoderFunc for float types. +func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() { + return ValueDecoderError{ + Name: "FloatDecodeValue", + Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, + Received: val, + } + } + + var f float64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return err + } + f = float64(i32) + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return err + } + f = float64(i64) + case bsontype.Double: + f, err = vr.ReadDouble() + if err != nil { + return err + } + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return err + } + if b { + f = 1 + } + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return err + } + default: + return fmt.Errorf("cannot decode %v into a float32 or float64 type", vrType) + } + + switch val.Kind() { + case reflect.Float32: + if !ec.Truncate && float64(float32(f)) != f { + return errors.New("FloatDecodeValue can only convert float64 to float32 when truncation is allowed") + } + case reflect.Float64: + default: + return ValueDecoderError{Name: "FloatDecodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val} + } + + val.SetFloat(f) + return nil +} + +// StringDecodeValue is the ValueDecoderFunc for string types. +// This method is deprecated and does not have any stability guarantees. It may be removed in the +// future. Use StringCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + var str string + var err error + switch vr.Type() { + // TODO(GODRIVER-577): Handle JavaScript and Symbol BSON types when allowed. + case bsontype.String: + str, err = vr.ReadString() + if err != nil { + return err + } + default: + return fmt.Errorf("cannot decode %v into a string type", vr.Type()) + } + if !val.CanSet() || val.Kind() != reflect.String { + return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} + } + + val.SetString(str) + return nil +} + +// JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type. +func (DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tJavaScript { + return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val} + } + + var js string + var err error + switch vrType := vr.Type(); vrType { + case bsontype.JavaScript: + js, err = vr.ReadJavascript() + case bsontype.Null: + err = vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a primitive.JavaScript", vrType) + } + + if err != nil { + return err + } + val.SetString(js) + return nil +} + +// SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type. +func (DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tSymbol { + return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val} + } + + var symbol string + var err error + switch vrType := vr.Type(); vrType { + case bsontype.String: + symbol, err = vr.ReadString() + if err != nil { + return err + } + case bsontype.Symbol: + symbol, err = vr.ReadSymbol() + if err != nil { + return err + } + case bsontype.Binary: + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return fmt.Errorf("SymbolDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) + } + symbol = string(data) + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return err + } + default: + return fmt.Errorf("cannot decode %v into a primitive.Symbol", vrType) + } + + val.SetString(symbol) + return nil +} + +// BinaryDecodeValue is the ValueDecoderFunc for Binary. +func (DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tBinary { + return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val} + } + + var data []byte + var subtype byte + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Binary: + data, subtype, err = vr.ReadBinary() + case bsontype.Null: + err = vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a Binary", vrType) + } + + if err != nil { + return err + } + val.Set(reflect.ValueOf(primitive.Binary{Subtype: subtype, Data: data})) + return nil +} + +// UndefinedDecodeValue is the ValueDecoderFunc for Undefined. +func (DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tUndefined { + return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val} + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Undefined: + err = vr.ReadUndefined() + case bsontype.Null: + err = vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into an Undefined", vr.Type()) + } + + if err != nil { + return err + } + val.Set(reflect.ValueOf(primitive.Undefined{})) + return nil +} + +// ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID. +func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tOID { + return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val} + } + + var oid primitive.ObjectID + var err error + switch vrType := vr.Type(); vrType { + case bsontype.ObjectID: + oid, err = vr.ReadObjectID() + if err != nil { + return err + } + case bsontype.String: + str, err := vr.ReadString() + if err != nil { + return err + } + if len(str) != 12 { + return fmt.Errorf("an ObjectID string must be exactly 12 bytes long (got %v)", len(str)) + } + byteArr := []byte(str) + copy(oid[:], byteArr) + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return err + } + default: + return fmt.Errorf("cannot decode %v into an ObjectID", vrType) + } + + val.Set(reflect.ValueOf(oid)) + return nil +} + +// DateTimeDecodeValue is the ValueDecoderFunc for DateTime. +func (DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tDateTime { + return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val} + } + + var dt int64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.DateTime: + dt, err = vr.ReadDateTime() + case bsontype.Null: + err = vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a DateTime", vrType) + } + + if err != nil { + return err + } + val.Set(reflect.ValueOf(primitive.DateTime(dt))) + return nil +} + +// NullDecodeValue is the ValueDecoderFunc for Null. +func (DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tNull { + return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val} + } + + if vrType := vr.Type(); vrType != bsontype.Null { + return fmt.Errorf("cannot decode %v into a Null", vrType) + } + + val.Set(reflect.ValueOf(primitive.Null{})) + return vr.ReadNull() +} + +// RegexDecodeValue is the ValueDecoderFunc for Regex. +func (DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tRegex { + return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val} + } + + var pattern, options string + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Regex: + pattern, options, err = vr.ReadRegex() + case bsontype.Null: + err = vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a Regex", vrType) + } + + if err != nil { + return err + } + val.Set(reflect.ValueOf(primitive.Regex{Pattern: pattern, Options: options})) + return nil +} + +// DBPointerDecodeValue is the ValueDecoderFunc for DBPointer. +func (DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tDBPointer { + return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val} + } + + var ns string + var pointer primitive.ObjectID + var err error + switch vrType := vr.Type(); vrType { + case bsontype.DBPointer: + ns, pointer, err = vr.ReadDBPointer() + case bsontype.Null: + err = vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a DBPointer", vrType) + } + + if err != nil { + return err + } + val.Set(reflect.ValueOf(primitive.DBPointer{DB: ns, Pointer: pointer})) + return nil +} + +// TimestampDecodeValue is the ValueDecoderFunc for Timestamp. +func (DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tTimestamp { + return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val} + } + + var t, incr uint32 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Timestamp: + t, incr, err = vr.ReadTimestamp() + case bsontype.Null: + err = vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a Timestamp", vrType) + } + + if err != nil { + return err + } + val.Set(reflect.ValueOf(primitive.Timestamp{T: t, I: incr})) + return nil +} + +// MinKeyDecodeValue is the ValueDecoderFunc for MinKey. +func (DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tMinKey { + return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val} + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.MinKey: + err = vr.ReadMinKey() + case bsontype.Null: + err = vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a MinKey", vr.Type()) + } + + if err != nil { + return err + } + val.Set(reflect.ValueOf(primitive.MinKey{})) + return nil +} + +// MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey. +func (DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tMaxKey { + return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val} + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.MaxKey: + err = vr.ReadMaxKey() + case bsontype.Null: + err = vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a MaxKey", vr.Type()) + } + + if err != nil { + return err + } + val.Set(reflect.ValueOf(primitive.MaxKey{})) + return nil +} + +// Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128. +func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tDecimal { + return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val} + } + + var d128 primitive.Decimal128 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Decimal128: + d128, err = vr.ReadDecimal128() + case bsontype.Null: + err = vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a primitive.Decimal128", vr.Type()) + } + + if err != nil { + return err + } + val.Set(reflect.ValueOf(d128)) + return err +} + +// JSONNumberDecodeValue is the ValueDecoderFunc for json.Number. +func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tJSONNumber { + return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return err + } + val.Set(reflect.ValueOf(json.Number(strconv.FormatFloat(f64, 'f', -1, 64)))) + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return err + } + val.Set(reflect.ValueOf(json.Number(strconv.FormatInt(int64(i32), 10)))) + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return err + } + val.Set(reflect.ValueOf(json.Number(strconv.FormatInt(i64, 10)))) + case bsontype.Null: + if err := vr.ReadNull(); err != nil { + return err + } + val.SetString("") + default: + return fmt.Errorf("cannot decode %v into a json.Number", vrType) + } + + return nil +} + +// URLDecodeValue is the ValueDecoderFunc for url.URL. +func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tURL { + return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.String: + str, err := vr.ReadString() + if err != nil { + return err + } + + parsedURL, err := url.Parse(str) + if err != nil { + return err + } + val.Set(reflect.ValueOf(parsedURL).Elem()) + return nil + case bsontype.Null: + if err := vr.ReadNull(); err != nil { + return err + } + val.Set(reflect.ValueOf(url.URL{})) + return nil + default: + return fmt.Errorf("cannot decode %v into a *url.URL", vrType) + } +} + +// TimeDecodeValue is the ValueDecoderFunc for time.Time. +// This method is deprecated and does not have any stability guarantees. It may be removed in the +// future. Use Time.DecodeValue instead. +func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if vr.Type() != bsontype.DateTime { + return fmt.Errorf("cannot decode %v into a time.Time", vr.Type()) + } + + dt, err := vr.ReadDateTime() + if err != nil { + return err + } + + if !val.CanSet() || val.Type() != tTime { + return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} + } + + val.Set(reflect.ValueOf(time.Unix(dt/1000, dt%1000*1000000).UTC())) + return nil +} + +// ByteSliceDecodeValue is the ValueDecoderFunc for []byte. +// This method is deprecated and does not have any stability guarantees. It may be removed in the +// future. Use ByteSliceCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) ByteSliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null { + return fmt.Errorf("cannot decode %v into a []byte", vr.Type()) + } + + if !val.CanSet() || val.Type() != tByteSlice { + return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + + if vr.Type() == bsontype.Null { + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + } + + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + if subtype != 0x00 { + return fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 for %s, got %v", bsontype.Binary, subtype) + } + + val.Set(reflect.ValueOf(data)) + return nil +} + +// MapDecodeValue is the ValueDecoderFunc for map[string]* types. +// This method is deprecated and does not have any stability guarantees. It may be removed in the +// future. Use Map.DecodeValue instead. +func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { + return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + switch vr.Type() { + case bsontype.Type(0), bsontype.EmbeddedDocument: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type()) + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeMap(val.Type())) + } + + eType := val.Type().Elem() + decoder, err := dc.LookupDecoder(eType) + if err != nil { + return err + } + + if eType == tEmpty { + dc.Ancestor = val.Type() + } + + keyType := val.Type().Key() + for { + key, vr, err := dr.ReadElement() + if err == bsonrw.ErrEOD { + break + } + if err != nil { + return err + } + + elem := reflect.New(eType).Elem() + + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return err + } + + val.SetMapIndex(reflect.ValueOf(key).Convert(keyType), elem) + } + return nil +} + +// ArrayDecodeValue is the ValueDecoderFunc for array types. +func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Array { + return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Array: + case bsontype.Type(0), bsontype.EmbeddedDocument: + if val.Type().Elem() != tE { + return fmt.Errorf("cannot decode document into %s", val.Type()) + } + case bsontype.Binary: + if val.Type().Elem() != tByte { + return fmt.Errorf("ArrayDecodeValue can only be used to decode binary into a byte array, got %v", vrType) + } + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return fmt.Errorf("ArrayDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) + } + + if len(data) > val.Len() { + return fmt.Errorf("more elements returned in array than can fit inside %s", val.Type()) + } + + for idx, elem := range data { + val.Index(idx).Set(reflect.ValueOf(elem)) + } + return nil + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into an array", vrType) + } + + var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) + switch val.Type().Elem() { + case tE: + elemsFunc = dvd.decodeD + default: + elemsFunc = dvd.decodeDefault + } + + elems, err := elemsFunc(dc, vr, val) + if err != nil { + return err + } + + if len(elems) > val.Len() { + return fmt.Errorf("more elements returned in array than can fit inside %s, got %v elements", val.Type(), len(elems)) + } + + for idx, elem := range elems { + val.Index(idx).Set(elem) + } + + return nil +} + +// SliceDecodeValue is the ValueDecoderFunc for slice types. +// This method is deprecated and does not have any stability guarantees. It may be removed in the +// future. Use SliceCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Slice { + return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + switch vr.Type() { + case bsontype.Array: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Type(0), bsontype.EmbeddedDocument: + if val.Type().Elem() != tE { + return fmt.Errorf("cannot decode document into %s", val.Type()) + } + default: + return fmt.Errorf("cannot decode %v into a slice", vr.Type()) + } + + var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) + switch val.Type().Elem() { + case tE: + dc.Ancestor = val.Type() + elemsFunc = dvd.decodeD + default: + elemsFunc = dvd.decodeDefault + } + + elems, err := elemsFunc(dc, vr, val) + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) + } + + val.SetLen(0) + val.Set(reflect.Append(val, elems...)) + + return nil +} + +// ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations. +func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) { + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + + if val.Kind() == reflect.Ptr && val.IsNil() { + if !val.CanSet() { + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + val.Set(reflect.New(val.Type().Elem())) + } + + if !val.Type().Implements(tValueUnmarshaler) { + if !val.CanAddr() { + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + val = val.Addr() // If they type doesn't implement the interface, a pointer to it must. + } + + t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) + if err != nil { + return err + } + + fn := val.Convert(tValueUnmarshaler).MethodByName("UnmarshalBSONValue") + errVal := fn.Call([]reflect.Value{reflect.ValueOf(t), reflect.ValueOf(src)})[0] + if !errVal.IsNil() { + return errVal.Interface().(error) + } + return nil +} + +// UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations. +func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + + if val.Kind() == reflect.Ptr && val.IsNil() { + if !val.CanSet() { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + val.Set(reflect.New(val.Type().Elem())) + } + + if !val.Type().Implements(tUnmarshaler) { + if !val.CanAddr() { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + val = val.Addr() // If they type doesn't implement the interface, a pointer to it must. + } + + _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) + if err != nil { + return err + } + + fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON") + errVal := fn.Call([]reflect.Value{reflect.ValueOf(src)})[0] + if !errVal.IsNil() { + return errVal.Interface().(error) + } + return nil +} + +// EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}. +// This method is deprecated and does not have any stability guarantees. It may be removed in the +// future. Use EmptyInterfaceCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tEmpty { + return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + rtype, err := dc.LookupTypeMapEntry(vr.Type()) + if err != nil { + switch vr.Type() { + case bsontype.EmbeddedDocument: + if dc.Ancestor != nil { + rtype = dc.Ancestor + break + } + rtype = tD + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return err + } + } + + decoder, err := dc.LookupDecoder(rtype) + if err != nil { + return err + } + + elem := reflect.New(rtype).Elem() + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +// CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document. +func (DefaultValueDecoders) CoreDocumentDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tCoreDocument { + return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, 0)) + } + + val.SetLen(0) + + cdoc, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(bsoncore.Document), vr) + val.Set(reflect.ValueOf(cdoc)) + return err +} + +func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) ([]reflect.Value, error) { + elems := make([]reflect.Value, 0) + + ar, err := vr.ReadArray() + if err != nil { + return nil, err + } + + eType := val.Type().Elem() + + decoder, err := dc.LookupDecoder(eType) + if err != nil { + return nil, err + } + + for { + vr, err := ar.ReadValue() + if err == bsonrw.ErrEOA { + break + } + if err != nil { + return nil, err + } + + elem := reflect.New(eType).Elem() + + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return nil, err + } + elems = append(elems, elem) + } + + return elems, nil +} + +// CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope. +func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tCodeWithScope { + return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.CodeWithScope: + code, dr, err := vr.ReadCodeWithScope() + if err != nil { + return err + } + + scope := reflect.New(tD).Elem() + elems, err := dvd.decodeElemsFromDocumentReader(dc, dr) + if err != nil { + return err + } + + scope.Set(reflect.MakeSlice(tD, 0, len(elems))) + scope.Set(reflect.Append(scope, elems...)) + + val.Set(reflect.ValueOf(primitive.CodeWithScope{ + Code: primitive.JavaScript(code), + Scope: scope.Interface().(primitive.D), + })) + return nil + case bsontype.Null: + if err := vr.ReadNull(); err != nil { + return err + } + val.Set(reflect.ValueOf(primitive.CodeWithScope{})) + return nil + default: + return fmt.Errorf("cannot decode %v into a primitive.CodeWithScope", vrType) + } +} + +func (dvd DefaultValueDecoders) decodeD(dc DecodeContext, vr bsonrw.ValueReader, _ reflect.Value) ([]reflect.Value, error) { + switch vr.Type() { + case bsontype.Type(0), bsontype.EmbeddedDocument: + default: + return nil, fmt.Errorf("cannot decode %v into a D", vr.Type()) + } + + dr, err := vr.ReadDocument() + if err != nil { + return nil, err + } + + return dvd.decodeElemsFromDocumentReader(dc, dr) +} + +func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr bsonrw.DocumentReader) ([]reflect.Value, error) { + decoder, err := dc.LookupDecoder(tEmpty) + if err != nil { + return nil, err + } + + elems := make([]reflect.Value, 0) + for { + key, vr, err := dr.ReadElement() + if err == bsonrw.ErrEOD { + break + } + if err != nil { + return nil, err + } + + val := reflect.New(tEmpty).Elem() + err = decoder.DecodeValue(dc, vr, val) + if err != nil { + return nil, err + } + + elems = append(elems, reflect.ValueOf(primitive.E{Key: key, Value: val.Interface()})) + } + + return elems, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go new file mode 100644 index 0000000..08078b3 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go @@ -0,0 +1,771 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "net/url" + "reflect" + "sync" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var defaultValueEncoders DefaultValueEncoders + +var bvwPool = bsonrw.NewBSONValueWriterPool() + +var errInvalidValue = errors.New("cannot encode invalid element") + +var sliceWriterPool = sync.Pool{ + New: func() interface{} { + sw := make(bsonrw.SliceWriter, 0, 0) + return &sw + }, +} + +func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) error { + vw, err := dw.WriteDocumentElement(e.Key) + if err != nil { + return err + } + + if e.Value == nil { + return vw.WriteNull() + } + encoder, err := ec.LookupEncoder(reflect.TypeOf(e.Value)) + if err != nil { + return err + } + + err = encoder.EncodeValue(ec, vw, reflect.ValueOf(e.Value)) + if err != nil { + return err + } + return nil +} + +// DefaultValueEncoders is a namespace type for the default ValueEncoders used +// when creating a registry. +type DefaultValueEncoders struct{} + +// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with +// the provided RegistryBuilder. +func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { + if rb == nil { + panic(errors.New("argument to RegisterDefaultEncoders must not be nil")) + } + rb. + RegisterTypeEncoder(tByteSlice, defaultByteSliceCodec). + RegisterTypeEncoder(tTime, defaultTimeCodec). + RegisterTypeEncoder(tEmpty, defaultEmptyInterfaceCodec). + RegisterTypeEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)). + RegisterTypeEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)). + RegisterTypeEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)). + RegisterTypeEncoder(tURL, ValueEncoderFunc(dve.URLEncodeValue)). + RegisterTypeEncoder(tJavaScript, ValueEncoderFunc(dve.JavaScriptEncodeValue)). + RegisterTypeEncoder(tSymbol, ValueEncoderFunc(dve.SymbolEncodeValue)). + RegisterTypeEncoder(tBinary, ValueEncoderFunc(dve.BinaryEncodeValue)). + RegisterTypeEncoder(tUndefined, ValueEncoderFunc(dve.UndefinedEncodeValue)). + RegisterTypeEncoder(tDateTime, ValueEncoderFunc(dve.DateTimeEncodeValue)). + RegisterTypeEncoder(tNull, ValueEncoderFunc(dve.NullEncodeValue)). + RegisterTypeEncoder(tRegex, ValueEncoderFunc(dve.RegexEncodeValue)). + RegisterTypeEncoder(tDBPointer, ValueEncoderFunc(dve.DBPointerEncodeValue)). + RegisterTypeEncoder(tTimestamp, ValueEncoderFunc(dve.TimestampEncodeValue)). + RegisterTypeEncoder(tMinKey, ValueEncoderFunc(dve.MinKeyEncodeValue)). + RegisterTypeEncoder(tMaxKey, ValueEncoderFunc(dve.MaxKeyEncodeValue)). + RegisterTypeEncoder(tCoreDocument, ValueEncoderFunc(dve.CoreDocumentEncodeValue)). + RegisterTypeEncoder(tCodeWithScope, ValueEncoderFunc(dve.CodeWithScopeEncodeValue)). + RegisterDefaultEncoder(reflect.Bool, ValueEncoderFunc(dve.BooleanEncodeValue)). + RegisterDefaultEncoder(reflect.Int, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int8, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int16, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int32, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int64, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Uint, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint8, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint16, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint32, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint64, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Float32, ValueEncoderFunc(dve.FloatEncodeValue)). + RegisterDefaultEncoder(reflect.Float64, ValueEncoderFunc(dve.FloatEncodeValue)). + RegisterDefaultEncoder(reflect.Array, ValueEncoderFunc(dve.ArrayEncodeValue)). + RegisterDefaultEncoder(reflect.Map, defaultMapCodec). + RegisterDefaultEncoder(reflect.Slice, defaultSliceCodec). + RegisterDefaultEncoder(reflect.String, defaultStringCodec). + RegisterDefaultEncoder(reflect.Struct, defaultStructCodec). + RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec()). + RegisterHookEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)). + RegisterHookEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)). + RegisterHookEncoder(tProxy, ValueEncoderFunc(dve.ProxyEncodeValue)) +} + +// BooleanEncodeValue is the ValueEncoderFunc for bool types. +func (dve DefaultValueEncoders) BooleanEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Bool { + return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} + } + return vw.WriteBoolean(val.Bool()) +} + +func fitsIn32Bits(i int64) bool { + return math.MinInt32 <= i && i <= math.MaxInt32 +} + +// IntEncodeValue is the ValueEncoderFunc for int types. +func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32: + return vw.WriteInt32(int32(val.Int())) + case reflect.Int: + i64 := val.Int() + if fitsIn32Bits(i64) { + return vw.WriteInt32(int32(i64)) + } + return vw.WriteInt64(i64) + case reflect.Int64: + i64 := val.Int() + if ec.MinSize && fitsIn32Bits(i64) { + return vw.WriteInt32(int32(i64)) + } + return vw.WriteInt64(i64) + } + + return ValueEncoderError{ + Name: "IntEncodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: val, + } +} + +// UintEncodeValue is the ValueEncoderFunc for uint types. +// This method is deprecated and does not have any stability guarantees. It may be removed in the +// future. Use UIntCodec.EncodeValue instead. +func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Uint8, reflect.Uint16: + return vw.WriteInt32(int32(val.Uint())) + case reflect.Uint, reflect.Uint32, reflect.Uint64: + u64 := val.Uint() + if ec.MinSize && u64 <= math.MaxInt32 { + return vw.WriteInt32(int32(u64)) + } + if u64 > math.MaxInt64 { + return fmt.Errorf("%d overflows int64", u64) + } + return vw.WriteInt64(int64(u64)) + } + + return ValueEncoderError{ + Name: "UintEncodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } +} + +// FloatEncodeValue is the ValueEncoderFunc for float types. +func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Float32, reflect.Float64: + return vw.WriteDouble(val.Float()) + } + + return ValueEncoderError{Name: "FloatEncodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val} +} + +// StringEncodeValue is the ValueEncoderFunc for string types. +// This method is deprecated and does not have any stability guarantees. It may be removed in the +// future. Use StringCodec.EncodeValue instead. +func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.Kind() != reflect.String { + return ValueEncoderError{ + Name: "StringEncodeValue", + Kinds: []reflect.Kind{reflect.String}, + Received: val, + } + } + + return vw.WriteString(val.String()) +} + +// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID. +func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tOID { + return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val} + } + return vw.WriteObjectID(val.Interface().(primitive.ObjectID)) +} + +// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128. +func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tDecimal { + return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val} + } + return vw.WriteDecimal128(val.Interface().(primitive.Decimal128)) +} + +// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number. +func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tJSONNumber { + return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} + } + jsnum := val.Interface().(json.Number) + + // Attempt int first, then float64 + if i64, err := jsnum.Int64(); err == nil { + return dve.IntEncodeValue(ec, vw, reflect.ValueOf(i64)) + } + + f64, err := jsnum.Float64() + if err != nil { + return err + } + + return dve.FloatEncodeValue(ec, vw, reflect.ValueOf(f64)) +} + +// URLEncodeValue is the ValueEncoderFunc for url.URL. +func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tURL { + return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val} + } + u := val.Interface().(url.URL) + return vw.WriteString(u.String()) +} + +// TimeEncodeValue is the ValueEncoderFunc for time.TIme. +// This method is deprecated and does not have any stability guarantees. It may be removed in the +// future. Use TimeCodec.EncodeValue instead. +func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tTime { + return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} + } + tt := val.Interface().(time.Time) + return vw.WriteDateTime(tt.Unix()*1000 + int64(tt.Nanosecond()/1e6)) +} + +// ByteSliceEncodeValue is the ValueEncoderFunc for []byte. +// This method is deprecated and does not have any stability guarantees. It may be removed in the +// future. Use ByteSliceCodec.EncodeValue instead. +func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tByteSlice { + return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + if val.IsNil() { + return vw.WriteNull() + } + return vw.WriteBinary(val.Interface().([]byte)) +} + +// MapEncodeValue is the ValueEncoderFunc for map[string]* types. +// This method is deprecated and does not have any stability guarantees. It may be removed in the +// future. Use MapCodec.EncodeValue instead. +func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { + return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + if val.IsNil() { + // If we have a nill map but we can't WriteNull, that means we're probably trying to encode + // to a TopLevel document. We can't currently tell if this is what actually happened, but if + // there's a deeper underlying problem, the error will also be returned from WriteDocument, + // so just continue. The operations on a map reflection value are valid, so we can call + // MapKeys within mapEncodeValue without a problem. + err := vw.WriteNull() + if err == nil { + return nil + } + } + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + return dve.mapEncodeValue(ec, dw, val, nil) +} + +// mapEncodeValue handles encoding of the values of a map. The collisionFn returns +// true if the provided key exists, this is mainly used for inline maps in the +// struct codec. +func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + keys := val.MapKeys() + for _, key := range keys { + if collisionFn != nil && collisionFn(key.String()) { + return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) + } + + currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key)) + if lookupErr != nil && lookupErr != errInvalidValue { + return lookupErr + } + + vw, err := dw.WriteDocumentElement(key.String()) + if err != nil { + return err + } + + if lookupErr == errInvalidValue { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + if enc, ok := currEncoder.(ValueEncoder); ok { + err = enc.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + continue + } + err = encoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() +} + +// ArrayEncodeValue is the ValueEncoderFunc for array types. +func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Array { + return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} + } + + // If we have a []primitive.E we want to treat it as a document instead of as an array. + if val.Type().Elem() == tE { + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + e := val.Index(idx).Interface().(primitive.E) + err = encodeElement(ec, dw, e) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() + } + + // If we have a []byte we want to treat it as a binary instead of as an array. + if val.Type().Elem() == tByte { + var byteSlice []byte + for idx := 0; idx < val.Len(); idx++ { + byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) + } + return vw.WriteBinary(byteSlice) + } + + aw, err := vw.WriteArray() + if err != nil { + return err + } + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) + if lookupErr != nil && lookupErr != errInvalidValue { + return lookupErr + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + if lookupErr == errInvalidValue { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + return aw.WriteArrayEnd() +} + +// SliceEncodeValue is the ValueEncoderFunc for slice types. +// This method is deprecated and does not have any stability guarantees. It may be removed in the +// future. Use SliceCodec.EncodeValue instead. +func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Slice { + return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + + // If we have a []primitive.E we want to treat it as a document instead of as an array. + if val.Type().ConvertibleTo(tD) { + d := val.Convert(tD).Interface().(primitive.D) + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + for _, e := range d { + err = encodeElement(ec, dw, e) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() + } + + aw, err := vw.WriteArray() + if err != nil { + return err + } + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) + if lookupErr != nil && lookupErr != errInvalidValue { + return lookupErr + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + if lookupErr == errInvalidValue { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + return aw.WriteArrayEnd() +} + +func (dve DefaultValueEncoders) lookupElementEncoder(ec EncodeContext, origEncoder ValueEncoder, currVal reflect.Value) (ValueEncoder, reflect.Value, error) { + if origEncoder != nil || (currVal.Kind() != reflect.Interface) { + return origEncoder, currVal, nil + } + currVal = currVal.Elem() + if !currVal.IsValid() { + return nil, currVal, errInvalidValue + } + currEncoder, err := ec.LookupEncoder(currVal.Type()) + + return currEncoder, currVal, err +} + +// EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}. +// This method is deprecated and does not have any stability guarantees. It may be removed in the +// future. Use EmptyInterfaceCodec.EncodeValue instead. +func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tEmpty { + return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + encoder, err := ec.LookupEncoder(val.Elem().Type()) + if err != nil { + return err + } + + return encoder.EncodeValue(ec, vw, val.Elem()) +} + +// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations. +func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + // Either val or a pointer to val must implement ValueMarshaler + switch { + case !val.IsValid(): + return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} + case val.Type().Implements(tValueMarshaler): + // If ValueMarshaler is implemented on a concrete type, make sure that val isn't a nil pointer + if isImplementationNil(val, tValueMarshaler) { + return vw.WriteNull() + } + case reflect.PtrTo(val.Type()).Implements(tValueMarshaler) && val.CanAddr(): + val = val.Addr() + default: + return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} + } + + fn := val.Convert(tValueMarshaler).MethodByName("MarshalBSONValue") + returns := fn.Call(nil) + if !returns[2].IsNil() { + return returns[2].Interface().(error) + } + t, data := returns[0].Interface().(bsontype.Type), returns[1].Interface().([]byte) + return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data) +} + +// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations. +func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + // Either val or a pointer to val must implement Marshaler + switch { + case !val.IsValid(): + return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} + case val.Type().Implements(tMarshaler): + // If Marshaler is implemented on a concrete type, make sure that val isn't a nil pointer + if isImplementationNil(val, tMarshaler) { + return vw.WriteNull() + } + case reflect.PtrTo(val.Type()).Implements(tMarshaler) && val.CanAddr(): + val = val.Addr() + default: + return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} + } + + fn := val.Convert(tMarshaler).MethodByName("MarshalBSON") + returns := fn.Call(nil) + if !returns[1].IsNil() { + return returns[1].Interface().(error) + } + data := returns[0].Interface().([]byte) + return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data) +} + +// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations. +func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + // Either val or a pointer to val must implement Proxy + switch { + case !val.IsValid(): + return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} + case val.Type().Implements(tProxy): + // If Proxy is implemented on a concrete type, make sure that val isn't a nil pointer + if isImplementationNil(val, tProxy) { + return vw.WriteNull() + } + case reflect.PtrTo(val.Type()).Implements(tProxy) && val.CanAddr(): + val = val.Addr() + default: + return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} + } + + fn := val.Convert(tProxy).MethodByName("ProxyBSON") + returns := fn.Call(nil) + if !returns[1].IsNil() { + return returns[1].Interface().(error) + } + data := returns[0] + var encoder ValueEncoder + var err error + if data.Elem().IsValid() { + encoder, err = ec.LookupEncoder(data.Elem().Type()) + } else { + encoder, err = ec.LookupEncoder(nil) + } + if err != nil { + return err + } + return encoder.EncodeValue(ec, vw, data.Elem()) +} + +// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type. +func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tJavaScript { + return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val} + } + + return vw.WriteJavascript(val.String()) +} + +// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type. +func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tSymbol { + return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val} + } + + return vw.WriteSymbol(val.String()) +} + +// BinaryEncodeValue is the ValueEncoderFunc for Binary. +func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tBinary { + return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val} + } + b := val.Interface().(primitive.Binary) + + return vw.WriteBinaryWithSubtype(b.Data, b.Subtype) +} + +// UndefinedEncodeValue is the ValueEncoderFunc for Undefined. +func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tUndefined { + return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val} + } + + return vw.WriteUndefined() +} + +// DateTimeEncodeValue is the ValueEncoderFunc for DateTime. +func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tDateTime { + return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val} + } + + return vw.WriteDateTime(val.Int()) +} + +// NullEncodeValue is the ValueEncoderFunc for Null. +func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tNull { + return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val} + } + + return vw.WriteNull() +} + +// RegexEncodeValue is the ValueEncoderFunc for Regex. +func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tRegex { + return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val} + } + + regex := val.Interface().(primitive.Regex) + + return vw.WriteRegex(regex.Pattern, regex.Options) +} + +// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer. +func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tDBPointer { + return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val} + } + + dbp := val.Interface().(primitive.DBPointer) + + return vw.WriteDBPointer(dbp.DB, dbp.Pointer) +} + +// TimestampEncodeValue is the ValueEncoderFunc for Timestamp. +func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tTimestamp { + return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val} + } + + ts := val.Interface().(primitive.Timestamp) + + return vw.WriteTimestamp(ts.T, ts.I) +} + +// MinKeyEncodeValue is the ValueEncoderFunc for MinKey. +func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tMinKey { + return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val} + } + + return vw.WriteMinKey() +} + +// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey. +func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tMaxKey { + return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val} + } + + return vw.WriteMaxKey() +} + +// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document. +func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tCoreDocument { + return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} + } + + cdoc := val.Interface().(bsoncore.Document) + + return bsonrw.Copier{}.CopyDocumentFromBytes(vw, cdoc) +} + +// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope. +func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tCodeWithScope { + return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} + } + + cws := val.Interface().(primitive.CodeWithScope) + + dw, err := vw.WriteCodeWithScope(string(cws.Code)) + if err != nil { + return err + } + + sw := sliceWriterPool.Get().(*bsonrw.SliceWriter) + defer sliceWriterPool.Put(sw) + *sw = (*sw)[:0] + + scopeVW := bvwPool.Get(sw) + defer bvwPool.Put(scopeVW) + + encoder, err := ec.LookupEncoder(reflect.TypeOf(cws.Scope)) + if err != nil { + return err + } + + err = encoder.EncodeValue(ec, scopeVW, reflect.ValueOf(cws.Scope)) + if err != nil { + return err + } + + err = bsonrw.Copier{}.CopyBytesToDocumentWriter(dw, *sw) + if err != nil { + return err + } + return dw.WriteDocumentEnd() +} + +// isImplementationNil returns if val is a nil pointer and inter is implemented on a concrete type +func isImplementationNil(val reflect.Value, inter reflect.Type) bool { + vt := val.Type() + for vt.Kind() == reflect.Ptr { + vt = vt.Elem() + } + return vt.Implements(inter) && val.Kind() == reflect.Ptr && val.IsNil() +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go new file mode 100644 index 0000000..c1e20f9 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go @@ -0,0 +1,84 @@ +// Package bsoncodec provides a system for encoding values to BSON representations and decoding +// values from BSON representations. This package considers both binary BSON and ExtendedJSON as +// BSON representations. The types in this package enable a flexible system for handling this +// encoding and decoding. +// +// The codec system is composed of two parts: +// +// 1) ValueEncoders and ValueDecoders that handle encoding and decoding Go values to and from BSON +// representations. +// +// 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for +// retrieving them. +// +// ValueEncoders and ValueDecoders +// +// The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON. +// The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the +// EncodeValue method to actually create the BSON representation. For convenience, ValueEncoderFunc +// is provided to allow use of a function with the correct signature as a ValueEncoder. An +// EncodeContext instance is provided to allow implementations to lookup further ValueEncoders and +// to provide configuration information. +// +// The ValueDecoder interface is the inverse of the ValueEncoder. Implementations should ensure that +// the value they receive is settable. Similar to ValueEncoderFunc, ValueDecoderFunc is provided to +// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext +// instance is provided and serves similar functionality to the EncodeContext. +// +// Registry and RegistryBuilder +// +// A Registry is an immutable store for ValueEncoders, ValueDecoders, and a type map. See the Registry type +// documentation for examples of registering various custom encoders and decoders. A Registry can be constructed using a +// RegistryBuilder, which handles three main types of codecs: +// +// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and RegisterTypeDecoder methods. +// The registered codec will be invoked when encoding/decoding a value whose type matches the registered type exactly. +// If the registered type is an interface, the codec will be invoked when encoding or decoding values whose type is the +// interface, but not for values with concrete types that implement the interface. +// +// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and RegisterHookDecoder methods. +// These methods only accept interface types and the registered codecs will be invoked when encoding or decoding values +// whose types implement the interface. An example of a hook defined by the driver is bson.Marshaler. The driver will +// call the MarshalBSON method for any value whose type implements bson.Marshaler, regardless of the value's concrete +// type. +// +// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type associations are used when +// decoding into a bson.D/bson.M or a struct field of type interface{}. For example, by default, BSON int32 and int64 +// values decode as Go int32 and int64 instances, respectively, when decoding into a bson.D. The following code would +// change the behavior so these values decode as Go int instances instead: +// +// intType := reflect.TypeOf(int(0)) +// registryBuilder.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) +// +// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and RegisterDefaultDecoder +// methods. The registered codec will be invoked when encoding or decoding values whose reflect.Kind matches the +// registered reflect.Kind as long as the value's type doesn't match a registered type or hook encoder/decoder first. +// These methods should be used to change the behavior for all values for a specific kind. +// +// Registry Lookup Procedure +// +// When looking up an encoder in a Registry, the precedence rules are as follows: +// +// 1. A type encoder registered for the exact type of the value. +// +// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to the value. If the +// value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and bsoncodec.ValueMarshaler), the first +// one registered will be selected. Note that registries constructed using bson.NewRegistryBuilder have driver-defined +// hooks registered for the bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those +// will take precedence over any new hooks. +// +// 3. A kind encoder registered for the value's kind. +// +// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The same precedence +// rules apply for decoders, with the exception that an error of type ErrNoDecoder will be returned if no decoder is +// found. +// +// DefaultValueEncoders and DefaultValueDecoders +// +// The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and +// ValueDecoders for handling a wide range of Go types, including all of the types within the +// primitive package. To make registering these codecs easier, a helper method on each type is +// provided. For the DefaultValueEncoders type the method is called RegisterDefaultEncoders and for +// the DefaultValueDecoders type the method is called RegisterDefaultDecoders, this method also +// handles registering type map entries for each BSON type. +package bsoncodec diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go new file mode 100644 index 0000000..c215ec3 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go @@ -0,0 +1,125 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +var defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec() + +// EmptyInterfaceCodec is the Codec used for interface{} values. +type EmptyInterfaceCodec struct { + DecodeBinaryAsSlice bool +} + +var _ ValueCodec = &EmptyInterfaceCodec{} + +// NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. +func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { + interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...) + + codec := EmptyInterfaceCodec{} + if interfaceOpt.DecodeBinaryAsSlice != nil { + codec.DecodeBinaryAsSlice = *interfaceOpt.DecodeBinaryAsSlice + } + return &codec +} + +// EncodeValue is the ValueEncoderFunc for interface{}. +func (eic EmptyInterfaceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tEmpty { + return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + encoder, err := ec.LookupEncoder(val.Elem().Type()) + if err != nil { + return err + } + + return encoder.EncodeValue(ec, vw, val.Elem()) +} + +func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, valueType bsontype.Type) (reflect.Type, error) { + isDocument := valueType == bsontype.Type(0) || valueType == bsontype.EmbeddedDocument + if isDocument && dc.Ancestor != nil { + // Using ancestor information rather than looking up the type map entry forces consistent decoding. + // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry + // has been registered. + return dc.Ancestor, nil + } + + rtype, err := dc.LookupTypeMapEntry(valueType) + if err == nil { + return rtype, nil + } + + if isDocument { + // For documents, fallback to looking up a type map entry for bsontype.Type(0) or bsontype.EmbeddedDocument, + // depending on the original valueType. + var lookupType bsontype.Type + switch valueType { + case bsontype.Type(0): + lookupType = bsontype.EmbeddedDocument + case bsontype.EmbeddedDocument: + lookupType = bsontype.Type(0) + } + + rtype, err = dc.LookupTypeMapEntry(lookupType) + if err == nil { + return rtype, nil + } + } + + return nil, err +} + +// DecodeValue is the ValueDecoderFunc for interface{}. +func (eic EmptyInterfaceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tEmpty { + return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + rtype, err := eic.getEmptyInterfaceDecodeType(dc, vr.Type()) + if err != nil { + switch vr.Type() { + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return err + } + } + + decoder, err := dc.LookupDecoder(rtype) + if err != nil { + return err + } + + elem := reflect.New(rtype).Elem() + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return err + } + if eic.DecodeBinaryAsSlice && rtype == tBinary { + binElem := elem.Interface().(primitive.Binary) + if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld { + elem = reflect.ValueOf(binElem.Data) + } + } + + val.Set(elem) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go new file mode 100644 index 0000000..85ae9c6 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -0,0 +1,206 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + "strconv" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +var defaultMapCodec = NewMapCodec() + +// MapCodec is the Codec used for map values. +type MapCodec struct { + DecodeZerosMap bool + EncodeNilAsEmpty bool +} + +var _ ValueCodec = &MapCodec{} + +// NewMapCodec returns a MapCodec with options opts. +func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { + mapOpt := bsonoptions.MergeMapCodecOptions(opts...) + + codec := MapCodec{} + if mapOpt.DecodeZerosMap != nil { + codec.DecodeZerosMap = *mapOpt.DecodeZerosMap + } + if mapOpt.EncodeNilAsEmpty != nil { + codec.EncodeNilAsEmpty = *mapOpt.EncodeNilAsEmpty + } + return &codec +} + +// EncodeValue is the ValueEncoder for map[*]* types. +func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Map { + return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + if val.IsNil() && !mc.EncodeNilAsEmpty { + // If we have a nil map but we can't WriteNull, that means we're probably trying to encode + // to a TopLevel document. We can't currently tell if this is what actually happened, but if + // there's a deeper underlying problem, the error will also be returned from WriteDocument, + // so just continue. The operations on a map reflection value are valid, so we can call + // MapKeys within mapEncodeValue without a problem. + err := vw.WriteNull() + if err == nil { + return nil + } + } + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + return mc.mapEncodeValue(ec, dw, val, nil) +} + +// mapEncodeValue handles encoding of the values of a map. The collisionFn returns +// true if the provided key exists, this is mainly used for inline maps in the +// struct codec. +func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + keys := val.MapKeys() + for _, key := range keys { + keyStr := fmt.Sprint(key) + if collisionFn != nil && collisionFn(keyStr) { + return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) + } + + currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key)) + if lookupErr != nil && lookupErr != errInvalidValue { + return lookupErr + } + + vw, err := dw.WriteDocumentElement(keyStr) + if err != nil { + return err + } + + if lookupErr == errInvalidValue { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + if enc, ok := currEncoder.(ValueEncoder); ok { + err = enc.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + continue + } + err = encoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() +} + +// DecodeValue is the ValueDecoder for map[string/decimal]* types. +func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if val.Kind() != reflect.Map || (!val.CanSet() && val.IsNil()) { + return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Type(0), bsontype.EmbeddedDocument: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeMap(val.Type())) + } + + if val.Len() > 0 && mc.DecodeZerosMap { + clearMap(val) + } + + eType := val.Type().Elem() + decoder, err := dc.LookupDecoder(eType) + if err != nil { + return err + } + + if eType == tEmpty { + dc.Ancestor = val.Type() + } + + keyType := val.Type().Key() + keyKind := keyType.Kind() + + for { + key, vr, err := dr.ReadElement() + if err == bsonrw.ErrEOD { + break + } + if err != nil { + return err + } + + k := reflect.ValueOf(key) + if keyType != tString { + switch keyKind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + parsed, err := strconv.ParseFloat(k.String(), 64) + if err != nil { + return fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %v", keyKind, err) + } + k = reflect.ValueOf(parsed) + case reflect.String: // if keyType wraps string + default: + return fmt.Errorf("BSON map must have string or decimal keys. Got:%v", val.Type()) + } + + k = k.Convert(keyType) + } + + elem := reflect.New(eType).Elem() + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return err + } + + val.SetMapIndex(k, elem) + } + return nil +} + +func clearMap(m reflect.Value) { + var none reflect.Value + for _, k := range m.MapKeys() { + m.SetMapIndex(k, none) + } +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go new file mode 100644 index 0000000..fbd9f0a --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go @@ -0,0 +1,65 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import "fmt" + +type mode int + +const ( + _ mode = iota + mTopLevel + mDocument + mArray + mValue + mElement + mCodeWithScope + mSpacer +) + +func (m mode) String() string { + var str string + + switch m { + case mTopLevel: + str = "TopLevel" + case mDocument: + str = "DocumentMode" + case mArray: + str = "ArrayMode" + case mValue: + str = "ValueMode" + case mElement: + str = "ElementMode" + case mCodeWithScope: + str = "CodeWithScopeMode" + case mSpacer: + str = "CodeWithScopeSpacerFrame" + default: + str = "UnknownMode" + } + + return str +} + +// TransitionError is an error returned when an invalid progressing a +// ValueReader or ValueWriter state machine occurs. +type TransitionError struct { + parent mode + current mode + destination mode +} + +func (te TransitionError) Error() string { + if te.destination == mode(0) { + return fmt.Sprintf("invalid state transition: cannot read/write value while in %s", te.current) + } + if te.parent == mode(0) { + return fmt.Sprintf("invalid state transition: %s -> %s", te.current, te.destination) + } + return fmt.Sprintf("invalid state transition: %s -> %s; parent %s", te.current, te.destination, te.parent) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go new file mode 100644 index 0000000..0d9502f --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go @@ -0,0 +1,110 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "sync" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +var defaultPointerCodec = &PointerCodec{ + ecache: make(map[reflect.Type]ValueEncoder), + dcache: make(map[reflect.Type]ValueDecoder), +} + +var _ ValueEncoder = &PointerCodec{} +var _ ValueDecoder = &PointerCodec{} + +// PointerCodec is the Codec used for pointers. +type PointerCodec struct { + ecache map[reflect.Type]ValueEncoder + dcache map[reflect.Type]ValueDecoder + l sync.RWMutex +} + +// NewPointerCodec returns a PointerCodec that has been initialized. +func NewPointerCodec() *PointerCodec { + return &PointerCodec{ + ecache: make(map[reflect.Type]ValueEncoder), + dcache: make(map[reflect.Type]ValueDecoder), + } +} + +// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil +// or looking up an encoder for the type of value the pointer points to. +func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.Kind() != reflect.Ptr { + if !val.IsValid() { + return vw.WriteNull() + } + return ValueEncoderError{Name: "PointerCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + + pc.l.RLock() + enc, ok := pc.ecache[val.Type()] + pc.l.RUnlock() + if ok { + if enc == nil { + return ErrNoEncoder{Type: val.Type()} + } + return enc.EncodeValue(ec, vw, val.Elem()) + } + + enc, err := ec.LookupEncoder(val.Type().Elem()) + pc.l.Lock() + pc.ecache[val.Type()] = enc + pc.l.Unlock() + if err != nil { + return err + } + + return enc.EncodeValue(ec, vw, val.Elem()) +} + +// DecodeValue handles decoding a pointer by looking up a decoder for the type it points to and +// using that to decode. If the BSON value is Null, this method will set the pointer to nil. +func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Ptr { + return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} + } + + if vr.Type() == bsontype.Null { + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + } + + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + + pc.l.RLock() + dec, ok := pc.dcache[val.Type()] + pc.l.RUnlock() + if ok { + if dec == nil { + return ErrNoDecoder{Type: val.Type()} + } + return dec.DecodeValue(dc, vr, val.Elem()) + } + + dec, err := dc.LookupDecoder(val.Type().Elem()) + pc.l.Lock() + pc.dcache[val.Type()] = dec + pc.l.Unlock() + if err != nil { + return err + } + + return dec.DecodeValue(dc, vr, val.Elem()) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go new file mode 100644 index 0000000..4cf2b01 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +// Proxy is an interface implemented by types that cannot themselves be directly encoded. Types +// that implement this interface with have ProxyBSON called during the encoding process and that +// value will be encoded in place for the implementer. +type Proxy interface { + ProxyBSON() (interface{}, error) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go new file mode 100644 index 0000000..02b63bb --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -0,0 +1,472 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "errors" + "fmt" + "reflect" + "sync" + + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder. +var ErrNilType = errors.New("cannot perform a decoder lookup on ") + +// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder. +var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder") + +// ErrNoEncoder is returned when there wasn't an encoder available for a type. +type ErrNoEncoder struct { + Type reflect.Type +} + +func (ene ErrNoEncoder) Error() string { + if ene.Type == nil { + return "no encoder found for " + } + return "no encoder found for " + ene.Type.String() +} + +// ErrNoDecoder is returned when there wasn't a decoder available for a type. +type ErrNoDecoder struct { + Type reflect.Type +} + +func (end ErrNoDecoder) Error() string { + return "no decoder found for " + end.Type.String() +} + +// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type. +type ErrNoTypeMapEntry struct { + Type bsontype.Type +} + +func (entme ErrNoTypeMapEntry) Error() string { + return "no type map entry found for " + entme.Type.String() +} + +// ErrNotInterface is returned when the provided type is not an interface. +var ErrNotInterface = errors.New("The provided type is not an interface") + +var defaultRegistry *Registry + +func init() { + defaultRegistry = buildDefaultRegistry() +} + +// A RegistryBuilder is used to build a Registry. This type is not goroutine +// safe. +type RegistryBuilder struct { + typeEncoders map[reflect.Type]ValueEncoder + interfaceEncoders []interfaceValueEncoder + kindEncoders map[reflect.Kind]ValueEncoder + + typeDecoders map[reflect.Type]ValueDecoder + interfaceDecoders []interfaceValueDecoder + kindDecoders map[reflect.Kind]ValueDecoder + + typeMap map[bsontype.Type]reflect.Type +} + +// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main +// typed passed around and Encoders and Decoders are constructed from it. +type Registry struct { + typeEncoders map[reflect.Type]ValueEncoder + typeDecoders map[reflect.Type]ValueDecoder + + interfaceEncoders []interfaceValueEncoder + interfaceDecoders []interfaceValueDecoder + + kindEncoders map[reflect.Kind]ValueEncoder + kindDecoders map[reflect.Kind]ValueDecoder + + typeMap map[bsontype.Type]reflect.Type + + mu sync.RWMutex +} + +// NewRegistryBuilder creates a new empty RegistryBuilder. +func NewRegistryBuilder() *RegistryBuilder { + return &RegistryBuilder{ + typeEncoders: make(map[reflect.Type]ValueEncoder), + typeDecoders: make(map[reflect.Type]ValueDecoder), + + interfaceEncoders: make([]interfaceValueEncoder, 0), + interfaceDecoders: make([]interfaceValueDecoder, 0), + + kindEncoders: make(map[reflect.Kind]ValueEncoder), + kindDecoders: make(map[reflect.Kind]ValueDecoder), + + typeMap: make(map[bsontype.Type]reflect.Type), + } +} + +func buildDefaultRegistry() *Registry { + rb := NewRegistryBuilder() + defaultValueEncoders.RegisterDefaultEncoders(rb) + defaultValueDecoders.RegisterDefaultDecoders(rb) + return rb.Build() +} + +// RegisterCodec will register the provided ValueCodec for the provided type. +func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder { + rb.RegisterTypeEncoder(t, codec) + rb.RegisterTypeDecoder(t, codec) + return rb +} + +// RegisterTypeEncoder will register the provided ValueEncoder for the provided type. +// +// The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered +// for a pointer to that type. +// +// If the given type is an interface, the encoder will be called when marshalling a type that is that interface. It +// will not be called when marshalling a non-interface type that implements the interface. +func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { + rb.typeEncoders[t] = enc + return rb +} + +// RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when +// marshalling a type if the type implements t or a pointer to the type implements t. If the provided type is not +// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. +func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { + if t.Kind() != reflect.Interface { + panicStr := fmt.Sprintf("RegisterHookEncoder expects a type with kind reflect.Interface, "+ + "got type %s with kind %s", t, t.Kind()) + panic(panicStr) + } + + for idx, encoder := range rb.interfaceEncoders { + if encoder.i == t { + rb.interfaceEncoders[idx].ve = enc + return rb + } + } + + rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc}) + return rb +} + +// RegisterTypeDecoder will register the provided ValueDecoder for the provided type. +// +// The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered +// for a pointer to that type. +// +// If the given type is an interface, the decoder will be called when unmarshalling into a type that is that interface. +// It will not be called when unmarshalling into a non-interface type that implements the interface. +func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { + rb.typeDecoders[t] = dec + return rb +} + +// RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when +// unmarshalling into a type if the type implements t or a pointer to the type implements t. If the provided type is not +// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. +func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { + if t.Kind() != reflect.Interface { + panicStr := fmt.Sprintf("RegisterHookDecoder expects a type with kind reflect.Interface, "+ + "got type %s with kind %s", t, t.Kind()) + panic(panicStr) + } + + for idx, decoder := range rb.interfaceDecoders { + if decoder.i == t { + rb.interfaceDecoders[idx].vd = dec + return rb + } + } + + rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec}) + return rb +} + +// RegisterEncoder has been deprecated and will be removed in a future major version release. Use RegisterTypeEncoder +// or RegisterHookEncoder instead. +func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { + if t == tEmpty { + rb.typeEncoders[t] = enc + return rb + } + switch t.Kind() { + case reflect.Interface: + for idx, ir := range rb.interfaceEncoders { + if ir.i == t { + rb.interfaceEncoders[idx].ve = enc + return rb + } + } + + rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc}) + default: + rb.typeEncoders[t] = enc + } + return rb +} + +// RegisterDecoder has been deprecated and will be removed in a future major version release. Use RegisterTypeDecoder +// or RegisterHookDecoder instead. +func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { + if t == nil { + rb.typeDecoders[nil] = dec + return rb + } + if t == tEmpty { + rb.typeDecoders[t] = dec + return rb + } + switch t.Kind() { + case reflect.Interface: + for idx, ir := range rb.interfaceDecoders { + if ir.i == t { + rb.interfaceDecoders[idx].vd = dec + return rb + } + } + + rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec}) + default: + rb.typeDecoders[t] = dec + } + return rb +} + +// RegisterDefaultEncoder will registr the provided ValueEncoder to the provided +// kind. +func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder { + rb.kindEncoders[kind] = enc + return rb +} + +// RegisterDefaultDecoder will register the provided ValueDecoder to the +// provided kind. +func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder { + rb.kindDecoders[kind] = dec + return rb +} + +// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this +// mapping is decoding situations where an empty interface is used and a default type needs to be +// created and decoded into. +// +// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON +// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents +// to decode to bson.Raw, use the following code: +// rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) +func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { + rb.typeMap[bt] = rt + return rb +} + +// Build creates a Registry from the current state of this RegistryBuilder. +func (rb *RegistryBuilder) Build() *Registry { + registry := new(Registry) + + registry.typeEncoders = make(map[reflect.Type]ValueEncoder) + for t, enc := range rb.typeEncoders { + registry.typeEncoders[t] = enc + } + + registry.typeDecoders = make(map[reflect.Type]ValueDecoder) + for t, dec := range rb.typeDecoders { + registry.typeDecoders[t] = dec + } + + registry.interfaceEncoders = make([]interfaceValueEncoder, len(rb.interfaceEncoders)) + copy(registry.interfaceEncoders, rb.interfaceEncoders) + + registry.interfaceDecoders = make([]interfaceValueDecoder, len(rb.interfaceDecoders)) + copy(registry.interfaceDecoders, rb.interfaceDecoders) + + registry.kindEncoders = make(map[reflect.Kind]ValueEncoder) + for kind, enc := range rb.kindEncoders { + registry.kindEncoders[kind] = enc + } + + registry.kindDecoders = make(map[reflect.Kind]ValueDecoder) + for kind, dec := range rb.kindDecoders { + registry.kindDecoders[kind] = dec + } + + registry.typeMap = make(map[bsontype.Type]reflect.Type) + for bt, rt := range rb.typeMap { + registry.typeMap[bt] = rt + } + + return registry +} + +// LookupEncoder inspects the registry for an encoder for the given type. The lookup precendence works as follows: +// +// 1. An encoder registered for the exact type. If the given type represents an interface, an encoder registered using +// RegisterTypeEncoder for the interface will be selected. +// +// 2. An encoder registered using RegisterHookEncoder for an interface implemented by the type or by a pointer to the +// type. +// +// 3. An encoder registered for the reflect.Kind of the value. +// +// If no encoder is found, an error of type ErrNoEncoder is returned. +func (r *Registry) LookupEncoder(t reflect.Type) (ValueEncoder, error) { + encodererr := ErrNoEncoder{Type: t} + r.mu.RLock() + enc, found := r.lookupTypeEncoder(t) + r.mu.RUnlock() + if found { + if enc == nil { + return nil, ErrNoEncoder{Type: t} + } + return enc, nil + } + + enc, found = r.lookupInterfaceEncoder(t, true) + if found { + r.mu.Lock() + r.typeEncoders[t] = enc + r.mu.Unlock() + return enc, nil + } + + if t == nil { + r.mu.Lock() + r.typeEncoders[t] = nil + r.mu.Unlock() + return nil, encodererr + } + + enc, found = r.kindEncoders[t.Kind()] + if !found { + r.mu.Lock() + r.typeEncoders[t] = nil + r.mu.Unlock() + return nil, encodererr + } + + r.mu.Lock() + r.typeEncoders[t] = enc + r.mu.Unlock() + return enc, nil +} + +func (r *Registry) lookupTypeEncoder(t reflect.Type) (ValueEncoder, bool) { + enc, found := r.typeEncoders[t] + return enc, found +} + +func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (ValueEncoder, bool) { + if t == nil { + return nil, false + } + for _, ienc := range r.interfaceEncoders { + if t.Implements(ienc.i) { + return ienc.ve, true + } + if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(ienc.i) { + // if *t implements an interface, this will catch if t implements an interface further ahead + // in interfaceEncoders + defaultEnc, found := r.lookupInterfaceEncoder(t, false) + if !found { + defaultEnc, _ = r.kindEncoders[t.Kind()] + } + return newCondAddrEncoder(ienc.ve, defaultEnc), true + } + } + return nil, false +} + +// LookupDecoder inspects the registry for an decoder for the given type. The lookup precendence works as follows: +// +// 1. A decoder registered for the exact type. If the given type represents an interface, a decoder registered using +// RegisterTypeDecoder for the interface will be selected. +// +// 2. A decoder registered using RegisterHookDecoder for an interface implemented by the type or by a pointer to the +// type. +// +// 3. A decoder registered for the reflect.Kind of the value. +// +// If no decoder is found, an error of type ErrNoDecoder is returned. +func (r *Registry) LookupDecoder(t reflect.Type) (ValueDecoder, error) { + if t == nil { + return nil, ErrNilType + } + decodererr := ErrNoDecoder{Type: t} + r.mu.RLock() + dec, found := r.lookupTypeDecoder(t) + r.mu.RUnlock() + if found { + if dec == nil { + return nil, ErrNoDecoder{Type: t} + } + return dec, nil + } + + dec, found = r.lookupInterfaceDecoder(t, true) + if found { + r.mu.Lock() + r.typeDecoders[t] = dec + r.mu.Unlock() + return dec, nil + } + + dec, found = r.kindDecoders[t.Kind()] + if !found { + r.mu.Lock() + r.typeDecoders[t] = nil + r.mu.Unlock() + return nil, decodererr + } + + r.mu.Lock() + r.typeDecoders[t] = dec + r.mu.Unlock() + return dec, nil +} + +func (r *Registry) lookupTypeDecoder(t reflect.Type) (ValueDecoder, bool) { + dec, found := r.typeDecoders[t] + return dec, found +} + +func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (ValueDecoder, bool) { + for _, idec := range r.interfaceDecoders { + if t.Implements(idec.i) { + return idec.vd, true + } + if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(idec.i) { + // if *t implements an interface, this will catch if t implements an interface further ahead + // in interfaceDecoders + defaultDec, found := r.lookupInterfaceDecoder(t, false) + if !found { + defaultDec, _ = r.kindDecoders[t.Kind()] + } + return newCondAddrDecoder(idec.vd, defaultDec), true + } + } + return nil, false +} + +// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON +// type. If no type is found, ErrNoTypeMapEntry is returned. +func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) { + t, ok := r.typeMap[bt] + if !ok || t == nil { + return nil, ErrNoTypeMapEntry{Type: bt} + } + return t, nil +} + +type interfaceValueEncoder struct { + i reflect.Type + ve ValueEncoder +} + +type interfaceValueDecoder struct { + i reflect.Type + vd ValueDecoder +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go new file mode 100644 index 0000000..f0282eb --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go @@ -0,0 +1,196 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +var defaultSliceCodec = NewSliceCodec() + +// SliceCodec is the Codec used for slice values. +type SliceCodec struct { + EncodeNilAsEmpty bool +} + +var _ ValueCodec = &MapCodec{} + +// NewSliceCodec returns a MapCodec with options opts. +func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec { + sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...) + + codec := SliceCodec{} + if sliceOpt.EncodeNilAsEmpty != nil { + codec.EncodeNilAsEmpty = *sliceOpt.EncodeNilAsEmpty + } + return &codec +} + +// EncodeValue is the ValueEncoder for slice types. +func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Slice { + return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + if val.IsNil() && !sc.EncodeNilAsEmpty { + return vw.WriteNull() + } + + // If we have a []byte we want to treat it as a binary instead of as an array. + if val.Type().Elem() == tByte { + var byteSlice []byte + for idx := 0; idx < val.Len(); idx++ { + byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) + } + return vw.WriteBinary(byteSlice) + } + + // If we have a []primitive.E we want to treat it as a document instead of as an array. + if val.Type().ConvertibleTo(tD) { + d := val.Convert(tD).Interface().(primitive.D) + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + for _, e := range d { + err = encodeElement(ec, dw, e) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() + } + + aw, err := vw.WriteArray() + if err != nil { + return err + } + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx)) + if lookupErr != nil && lookupErr != errInvalidValue { + return lookupErr + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + if lookupErr == errInvalidValue { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + return aw.WriteArrayEnd() +} + +// DecodeValue is the ValueDecoder for slice types. +func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Slice { + return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Array: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Type(0), bsontype.EmbeddedDocument: + if val.Type().Elem() != tE { + return fmt.Errorf("cannot decode document into %s", val.Type()) + } + case bsontype.Binary: + if val.Type().Elem() != tByte { + return fmt.Errorf("SliceDecodeValue can only decode a binary into a byte array, got %v", vrType) + } + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return fmt.Errorf("SliceDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(data))) + } + + val.SetLen(0) + for _, elem := range data { + val.Set(reflect.Append(val, reflect.ValueOf(elem))) + } + return nil + case bsontype.String: + if val.Type().Elem() != tByte { + return fmt.Errorf("SliceDecodeValue can only decode a string into a byte array, got %v", vrType) + } + str, err := vr.ReadString() + if err != nil { + return err + } + byteStr := []byte(str) + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr))) + } + + val.SetLen(0) + for _, elem := range byteStr { + val.Set(reflect.Append(val, reflect.ValueOf(elem))) + } + return nil + default: + return fmt.Errorf("cannot decode %v into a slice", vrType) + } + + var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) + switch val.Type().Elem() { + case tE: + dc.Ancestor = val.Type() + elemsFunc = defaultValueDecoders.decodeD + default: + elemsFunc = defaultValueDecoders.decodeDefault + } + + elems, err := elemsFunc(dc, vr, val) + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) + } + + val.SetLen(0) + val.Set(reflect.Append(val, elems...)) + + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go new file mode 100644 index 0000000..c672cf5 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go @@ -0,0 +1,94 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +var defaultStringCodec = NewStringCodec() + +// StringCodec is the Codec used for struct values. +type StringCodec struct { + DecodeObjectIDAsHex bool +} + +var _ ValueCodec = &StringCodec{} + +// NewStringCodec returns a StringCodec with options opts. +func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { + stringOpt := bsonoptions.MergeStringCodecOptions(opts...) + return &StringCodec{*stringOpt.DecodeObjectIDAsHex} +} + +// EncodeValue is the ValueEncoder for string types. +func (sc *StringCodec) EncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.Kind() != reflect.String { + return ValueEncoderError{ + Name: "StringEncodeValue", + Kinds: []reflect.Kind{reflect.String}, + Received: val, + } + } + + return vw.WriteString(val.String()) +} + +// DecodeValue is the ValueDecoder for string types. +func (sc *StringCodec) DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.String { + return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} + } + var str string + var err error + switch vr.Type() { + case bsontype.String: + str, err = vr.ReadString() + if err != nil { + return err + } + case bsontype.ObjectID: + oid, err := vr.ReadObjectID() + if err != nil { + return err + } + if sc.DecodeObjectIDAsHex { + str = oid.Hex() + } else { + byteArray := [12]byte(oid) + str = string(byteArray[:]) + } + case bsontype.Symbol: + str, err = vr.ReadSymbol() + if err != nil { + return err + } + case bsontype.Binary: + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return fmt.Errorf("SliceDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) + } + str = string(data) + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return err + } + default: + return fmt.Errorf("cannot decode %v into a string type", vr.Type()) + } + + val.SetString(str) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go new file mode 100644 index 0000000..777cdfb --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -0,0 +1,536 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +var defaultStructCodec = &StructCodec{ + cache: make(map[reflect.Type]*structDescription), + parser: DefaultStructTagParser, +} + +// Zeroer allows custom struct types to implement a report of zero +// state. All struct types that don't implement Zeroer or where IsZero +// returns false are considered to be not zero. +type Zeroer interface { + IsZero() bool +} + +// StructCodec is the Codec used for struct values. +type StructCodec struct { + cache map[reflect.Type]*structDescription + l sync.RWMutex + parser StructTagParser + DecodeZeroStruct bool + DecodeDeepZeroInline bool + EncodeOmitDefaultStruct bool + AllowUnexportedFields bool +} + +var _ ValueEncoder = &StructCodec{} +var _ ValueDecoder = &StructCodec{} + +// NewStructCodec returns a StructCodec that uses p for struct tag parsing. +func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) { + if p == nil { + return nil, errors.New("a StructTagParser must be provided to NewStructCodec") + } + + structOpt := bsonoptions.MergeStructCodecOptions(opts...) + + codec := &StructCodec{ + cache: make(map[reflect.Type]*structDescription), + parser: p, + } + + if structOpt.DecodeZeroStruct != nil { + codec.DecodeZeroStruct = *structOpt.DecodeZeroStruct + } + if structOpt.DecodeDeepZeroInline != nil { + codec.DecodeDeepZeroInline = *structOpt.DecodeDeepZeroInline + } + if structOpt.EncodeOmitDefaultStruct != nil { + codec.EncodeOmitDefaultStruct = *structOpt.EncodeOmitDefaultStruct + } + if structOpt.AllowUnexportedFields != nil { + codec.AllowUnexportedFields = *structOpt.AllowUnexportedFields + } + + return codec, nil +} + +// EncodeValue handles encoding generic struct types. +func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Struct { + return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} + } + + sd, err := sc.describeStruct(r.Registry, val.Type()) + if err != nil { + return err + } + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + var rv reflect.Value + for _, desc := range sd.fl { + if desc.inline == nil { + rv = val.Field(desc.idx) + } else { + rv, err = fieldByIndexErr(val, desc.inline) + if err != nil { + continue + } + } + + desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(r, desc.encoder, rv) + + if err != nil && err != errInvalidValue { + return err + } + + if err == errInvalidValue { + if desc.omitEmpty { + continue + } + vw2, err := dw.WriteDocumentElement(desc.name) + if err != nil { + return err + } + err = vw2.WriteNull() + if err != nil { + return err + } + continue + } + + if desc.encoder == nil { + return ErrNoEncoder{Type: rv.Type()} + } + + encoder := desc.encoder + + var isZero bool + rvInterface := rv.Interface() + if cz, ok := encoder.(CodecZeroer); ok { + isZero = cz.IsTypeZero(rvInterface) + } else if rv.Kind() == reflect.Interface { + // sc.isZero will not treat an interface rv as an interface, so we need to check for the zero interface separately. + isZero = rv.IsNil() + } else { + isZero = sc.isZero(rvInterface) + } + if desc.omitEmpty && isZero { + continue + } + + vw2, err := dw.WriteDocumentElement(desc.name) + if err != nil { + return err + } + + ectx := EncodeContext{Registry: r.Registry, MinSize: desc.minSize} + err = encoder.EncodeValue(ectx, vw2, rv) + if err != nil { + return err + } + } + + if sd.inlineMap >= 0 { + rv := val.Field(sd.inlineMap) + collisionFn := func(key string) bool { + _, exists := sd.fm[key] + return exists + } + + return defaultMapCodec.mapEncodeValue(r, dw, rv, collisionFn) + } + + return dw.WriteDocumentEnd() +} + +// DecodeValue implements the Codec interface. +// By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr. +// For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared. +func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Struct { + return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Type(0), bsontype.EmbeddedDocument: + case bsontype.Null: + if err := vr.ReadNull(); err != nil { + return err + } + + val.Set(reflect.Zero(val.Type())) + return nil + default: + return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) + } + + sd, err := sc.describeStruct(r.Registry, val.Type()) + if err != nil { + return err + } + + if sc.DecodeZeroStruct { + val.Set(reflect.Zero(val.Type())) + } + if sc.DecodeDeepZeroInline && sd.inline { + val.Set(deepZero(val.Type())) + } + + var decoder ValueDecoder + var inlineMap reflect.Value + if sd.inlineMap >= 0 { + inlineMap = val.Field(sd.inlineMap) + decoder, err = r.LookupDecoder(inlineMap.Type().Elem()) + if err != nil { + return err + } + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + for { + name, vr, err := dr.ReadElement() + if err == bsonrw.ErrEOD { + break + } + if err != nil { + return err + } + + fd, exists := sd.fm[name] + if !exists { + // if the original name isn't found in the struct description, try again with the name in lowercase + // this could match if a BSON tag isn't specified because by default, describeStruct lowercases all field + // names + fd, exists = sd.fm[strings.ToLower(name)] + } + + if !exists { + if sd.inlineMap < 0 { + // The encoding/json package requires a flag to return on error for non-existent fields. + // This functionality seems appropriate for the struct codec. + err = vr.Skip() + if err != nil { + return err + } + continue + } + + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + + elem := reflect.New(inlineMap.Type().Elem()).Elem() + r.Ancestor = inlineMap.Type() + err = decoder.DecodeValue(r, vr, elem) + if err != nil { + return err + } + inlineMap.SetMapIndex(reflect.ValueOf(name), elem) + continue + } + + var field reflect.Value + if fd.inline == nil { + field = val.Field(fd.idx) + } else { + field, err = getInlineField(val, fd.inline) + if err != nil { + return err + } + } + + if !field.CanSet() { // Being settable is a super set of being addressable. + return fmt.Errorf("cannot decode element '%s' into field %v; it is not settable", name, field) + } + if field.Kind() == reflect.Ptr && field.IsNil() { + field.Set(reflect.New(field.Type().Elem())) + } + field = field.Addr() + + dctx := DecodeContext{Registry: r.Registry, Truncate: fd.truncate || r.Truncate} + if fd.decoder == nil { + return ErrNoDecoder{Type: field.Elem().Type()} + } + + if decoder, ok := fd.decoder.(ValueDecoder); ok { + err = decoder.DecodeValue(dctx, vr, field.Elem()) + if err != nil { + return err + } + continue + } + err = fd.decoder.DecodeValue(dctx, vr, field) + if err != nil { + return err + } + } + + return nil +} + +func (sc *StructCodec) isZero(i interface{}) bool { + v := reflect.ValueOf(i) + + // check the value validity + if !v.IsValid() { + return true + } + + if z, ok := v.Interface().(Zeroer); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) { + return z.IsZero() + } + + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Struct: + if sc.EncodeOmitDefaultStruct { + vt := v.Type() + if vt == tTime { + return v.Interface().(time.Time).IsZero() + } + for i := 0; i < v.NumField(); i++ { + if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous { + continue // Private field + } + fld := v.Field(i) + if !sc.isZero(fld.Interface()) { + return false + } + } + return true + } + } + + return false +} + +type structDescription struct { + fm map[string]fieldDescription + fl []fieldDescription + inlineMap int + inline bool +} + +type fieldDescription struct { + name string + idx int + omitEmpty bool + minSize bool + truncate bool + inline []int + encoder ValueEncoder + decoder ValueDecoder +} + +func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescription, error) { + // We need to analyze the struct, including getting the tags, collecting + // information about inlining, and create a map of the field name to the field. + sc.l.RLock() + ds, exists := sc.cache[t] + sc.l.RUnlock() + if exists { + return ds, nil + } + + numFields := t.NumField() + sd := &structDescription{ + fm: make(map[string]fieldDescription, numFields), + fl: make([]fieldDescription, 0, numFields), + inlineMap: -1, + } + + for i := 0; i < numFields; i++ { + sf := t.Field(i) + if sf.PkgPath != "" && (!sc.AllowUnexportedFields || !sf.Anonymous) { + // field is private or unexported fields aren't allowed, ignore + continue + } + + sfType := sf.Type + encoder, err := r.LookupEncoder(sfType) + if err != nil { + encoder = nil + } + decoder, err := r.LookupDecoder(sfType) + if err != nil { + decoder = nil + } + + description := fieldDescription{idx: i, encoder: encoder, decoder: decoder} + + stags, err := sc.parser.ParseStructTags(sf) + if err != nil { + return nil, err + } + if stags.Skip { + continue + } + description.name = stags.Name + description.omitEmpty = stags.OmitEmpty + description.minSize = stags.MinSize + description.truncate = stags.Truncate + + if stags.Inline { + sd.inline = true + switch sfType.Kind() { + case reflect.Map: + if sd.inlineMap >= 0 { + return nil, errors.New("(struct " + t.String() + ") multiple inline maps") + } + if sfType.Key() != tString { + return nil, errors.New("(struct " + t.String() + ") inline map must have a string keys") + } + sd.inlineMap = description.idx + case reflect.Ptr: + sfType = sfType.Elem() + if sfType.Kind() != reflect.Struct { + return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) + } + fallthrough + case reflect.Struct: + inlinesf, err := sc.describeStruct(r, sfType) + if err != nil { + return nil, err + } + for _, fd := range inlinesf.fl { + if _, exists := sd.fm[fd.name]; exists { + return nil, fmt.Errorf("(struct %s) duplicated key %s", t.String(), fd.name) + } + if fd.inline == nil { + fd.inline = []int{i, fd.idx} + } else { + fd.inline = append([]int{i}, fd.inline...) + } + sd.fm[fd.name] = fd + sd.fl = append(sd.fl, fd) + } + default: + return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) + } + continue + } + + if _, exists := sd.fm[description.name]; exists { + return nil, fmt.Errorf("struct %s) duplicated key %s", t.String(), description.name) + } + + sd.fm[description.name] = description + sd.fl = append(sd.fl, description) + } + + sc.l.Lock() + sc.cache[t] = sd + sc.l.Unlock() + + return sd, nil +} + +func fieldByIndexErr(v reflect.Value, index []int) (result reflect.Value, err error) { + defer func() { + if recovered := recover(); recovered != nil { + switch r := recovered.(type) { + case string: + err = fmt.Errorf("%s", r) + case error: + err = r + } + } + }() + + result = v.FieldByIndex(index) + return +} + +func getInlineField(val reflect.Value, index []int) (reflect.Value, error) { + field, err := fieldByIndexErr(val, index) + if err == nil { + return field, nil + } + + // if parent of this element doesn't exist, fix its parent + inlineParent := index[:len(index)-1] + var fParent reflect.Value + if fParent, err = fieldByIndexErr(val, inlineParent); err != nil { + fParent, err = getInlineField(val, inlineParent) + if err != nil { + return fParent, err + } + } + fParent.Set(reflect.New(fParent.Type().Elem())) + + return fieldByIndexErr(val, index) +} + +// DeepZero returns recursive zero object +func deepZero(st reflect.Type) (result reflect.Value) { + result = reflect.Indirect(reflect.New(st)) + + if result.Kind() == reflect.Struct { + for i := 0; i < result.NumField(); i++ { + if f := result.Field(i); f.Kind() == reflect.Ptr { + if f.CanInterface() { + if ft := reflect.TypeOf(f.Interface()); ft.Elem().Kind() == reflect.Struct { + result.Field(i).Set(recursivePointerTo(deepZero(ft.Elem()))) + } + } + } + } + } + + return +} + +// recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside +func recursivePointerTo(v reflect.Value) reflect.Value { + v = reflect.Indirect(v) + result := reflect.New(v.Type()) + if v.Kind() == reflect.Struct { + for i := 0; i < v.NumField(); i++ { + if f := v.Field(i); f.Kind() == reflect.Ptr { + if f.Elem().Kind() == reflect.Struct { + result.Elem().Field(i).Set(recursivePointerTo(f)) + } + } + } + } + + return result +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go new file mode 100644 index 0000000..69d0ae4 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go @@ -0,0 +1,119 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "strings" +) + +// StructTagParser returns the struct tags for a given struct field. +type StructTagParser interface { + ParseStructTags(reflect.StructField) (StructTags, error) +} + +// StructTagParserFunc is an adapter that allows a generic function to be used +// as a StructTagParser. +type StructTagParserFunc func(reflect.StructField) (StructTags, error) + +// ParseStructTags implements the StructTagParser interface. +func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructTags, error) { + return stpf(sf) +} + +// StructTags represents the struct tag fields that the StructCodec uses during +// the encoding and decoding process. +// +// In the case of a struct, the lowercased field name is used as the key for each exported +// field but this behavior may be changed using a struct tag. The tag may also contain flags to +// adjust the marshalling behavior for the field. +// +// The properties are defined below: +// +// OmitEmpty Only include the field if it's not set to the zero value for the type or to +// empty slices or maps. +// +// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's +// feasible while preserving the numeric value. +// +// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within +// a float32. +// +// Inline Inline the field, which must be a struct or a map, causing all of its fields +// or keys to be processed as if they were part of the outer struct. For maps, +// keys must not conflict with the bson keys of other struct fields. +// +// Skip This struct field should be skipped. This is usually denoted by parsing a "-" +// for the name. +// +// TODO(skriptble): Add tags for undefined as nil and for null as nil. +type StructTags struct { + Name string + OmitEmpty bool + MinSize bool + Truncate bool + Inline bool + Skip bool +} + +// DefaultStructTagParser is the StructTagParser used by the StructCodec by default. +// It will handle the bson struct tag. See the documentation for StructTags to see +// what each of the returned fields means. +// +// If there is no name in the struct tag fields, the struct field name is lowercased. +// The tag formats accepted are: +// +// "[][,[,]]" +// +// `(...) bson:"[][,[,]]" (...)` +// +// An example: +// +// type T struct { +// A bool +// B int "myb" +// C string "myc,omitempty" +// D string `bson:",omitempty" json:"jsonkey"` +// E int64 ",minsize" +// F int64 "myf,omitempty,minsize" +// } +// +// A struct tag either consisting entirely of '-' or with a bson key with a +// value consisting entirely of '-' will return a StructTags with Skip true and +// the remaining fields will be their default values. +var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { + key := strings.ToLower(sf.Name) + tag, ok := sf.Tag.Lookup("bson") + if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { + tag = string(sf.Tag) + } + var st StructTags + if tag == "-" { + st.Skip = true + return st, nil + } + + for idx, str := range strings.Split(tag, ",") { + if idx == 0 && str != "" { + key = str + } + switch str { + case "omitempty": + st.OmitEmpty = true + case "minsize": + st.MinSize = true + case "truncate": + st.Truncate = true + case "inline": + st.Inline = true + } + } + + st.Name = key + + return st, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go new file mode 100644 index 0000000..6f1b724 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go @@ -0,0 +1,101 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +const ( + timeFormatString = "2006-01-02T15:04:05.999Z07:00" +) + +var defaultTimeCodec = NewTimeCodec() + +// TimeCodec is the Codec used for time.Time values. +type TimeCodec struct { + UseLocalTimeZone bool +} + +var _ ValueCodec = &TimeCodec{} + +// NewTimeCodec returns a TimeCodec with options opts. +func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { + timeOpt := bsonoptions.MergeTimeCodecOptions(opts...) + + codec := TimeCodec{} + if timeOpt.UseLocalTimeZone != nil { + codec.UseLocalTimeZone = *timeOpt.UseLocalTimeZone + } + return &codec +} + +// DecodeValue is the ValueDecoderFunc for time.Time. +func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tTime { + return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} + } + + var timeVal time.Time + switch vrType := vr.Type(); vrType { + case bsontype.DateTime: + dt, err := vr.ReadDateTime() + if err != nil { + return err + } + timeVal = time.Unix(dt/1000, dt%1000*1000000) + case bsontype.String: + // assume strings are in the isoTimeFormat + timeStr, err := vr.ReadString() + if err != nil { + return err + } + timeVal, err = time.Parse(timeFormatString, timeStr) + if err != nil { + return err + } + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return err + } + timeVal = time.Unix(i64/1000, i64%1000*1000000) + case bsontype.Timestamp: + t, _, err := vr.ReadTimestamp() + if err != nil { + return err + } + timeVal = time.Unix(int64(t), 0) + case bsontype.Null: + if err := vr.ReadNull(); err != nil { + return err + } + default: + return fmt.Errorf("cannot decode %v into a time.Time", vrType) + } + + if !tc.UseLocalTimeZone { + timeVal = timeVal.UTC() + } + val.Set(reflect.ValueOf(timeVal)) + return nil +} + +// EncodeValue is the ValueEncoderFunc for time.TIme. +func (tc *TimeCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tTime { + return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} + } + tt := val.Interface().(time.Time) + return vw.WriteDateTime(tt.Unix()*1000 + int64(tt.Nanosecond()/1e6)) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go new file mode 100644 index 0000000..bbb6bb9 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go @@ -0,0 +1,81 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding/json" + "net/url" + "reflect" + "time" + + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var ptBool = reflect.TypeOf((*bool)(nil)) +var ptInt8 = reflect.TypeOf((*int8)(nil)) +var ptInt16 = reflect.TypeOf((*int16)(nil)) +var ptInt32 = reflect.TypeOf((*int32)(nil)) +var ptInt64 = reflect.TypeOf((*int64)(nil)) +var ptInt = reflect.TypeOf((*int)(nil)) +var ptUint8 = reflect.TypeOf((*uint8)(nil)) +var ptUint16 = reflect.TypeOf((*uint16)(nil)) +var ptUint32 = reflect.TypeOf((*uint32)(nil)) +var ptUint64 = reflect.TypeOf((*uint64)(nil)) +var ptUint = reflect.TypeOf((*uint)(nil)) +var ptFloat32 = reflect.TypeOf((*float32)(nil)) +var ptFloat64 = reflect.TypeOf((*float64)(nil)) +var ptString = reflect.TypeOf((*string)(nil)) + +var tBool = reflect.TypeOf(false) +var tFloat32 = reflect.TypeOf(float32(0)) +var tFloat64 = reflect.TypeOf(float64(0)) +var tInt = reflect.TypeOf(int(0)) +var tInt8 = reflect.TypeOf(int8(0)) +var tInt16 = reflect.TypeOf(int16(0)) +var tInt32 = reflect.TypeOf(int32(0)) +var tInt64 = reflect.TypeOf(int64(0)) +var tString = reflect.TypeOf("") +var tTime = reflect.TypeOf(time.Time{}) +var tUint = reflect.TypeOf(uint(0)) +var tUint8 = reflect.TypeOf(uint8(0)) +var tUint16 = reflect.TypeOf(uint16(0)) +var tUint32 = reflect.TypeOf(uint32(0)) +var tUint64 = reflect.TypeOf(uint64(0)) + +var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem() +var tByteSlice = reflect.TypeOf([]byte(nil)) +var tByte = reflect.TypeOf(byte(0x00)) +var tURL = reflect.TypeOf(url.URL{}) +var tJSONNumber = reflect.TypeOf(json.Number("")) + +var tValueMarshaler = reflect.TypeOf((*ValueMarshaler)(nil)).Elem() +var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem() +var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() +var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem() + +var tBinary = reflect.TypeOf(primitive.Binary{}) +var tUndefined = reflect.TypeOf(primitive.Undefined{}) +var tOID = reflect.TypeOf(primitive.ObjectID{}) +var tDateTime = reflect.TypeOf(primitive.DateTime(0)) +var tNull = reflect.TypeOf(primitive.Null{}) +var tRegex = reflect.TypeOf(primitive.Regex{}) +var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{}) +var tDBPointer = reflect.TypeOf(primitive.DBPointer{}) +var tJavaScript = reflect.TypeOf(primitive.JavaScript("")) +var tSymbol = reflect.TypeOf(primitive.Symbol("")) +var tTimestamp = reflect.TypeOf(primitive.Timestamp{}) +var tDecimal = reflect.TypeOf(primitive.Decimal128{}) +var tMinKey = reflect.TypeOf(primitive.MinKey{}) +var tMaxKey = reflect.TypeOf(primitive.MaxKey{}) +var tD = reflect.TypeOf(primitive.D{}) +var tM = reflect.TypeOf(primitive.M{}) +var tA = reflect.TypeOf(primitive.A{}) +var tE = reflect.TypeOf(primitive.E{}) + +var tCoreDocument = reflect.TypeOf(bsoncore.Document{}) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go new file mode 100644 index 0000000..e0df058 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go @@ -0,0 +1,150 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "errors" + "fmt" + "math" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +var defaultUIntCodec = NewUIntCodec() + +// UIntCodec is the Codec used for uint values. +type UIntCodec struct { + EncodeToMinSize bool +} + +var _ ValueCodec = &UIntCodec{} + +// NewUIntCodec returns a UIntCodec with options opts. +func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { + uintOpt := bsonoptions.MergeUIntCodecOptions(opts...) + + codec := UIntCodec{} + if uintOpt.EncodeToMinSize != nil { + codec.EncodeToMinSize = *uintOpt.EncodeToMinSize + } + return &codec +} + +// EncodeValue is the ValueEncoder for uint types. +func (uic *UIntCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Uint8, reflect.Uint16: + return vw.WriteInt32(int32(val.Uint())) + case reflect.Uint, reflect.Uint32, reflect.Uint64: + u64 := val.Uint() + + // If ec.MinSize or if encodeToMinSize is true for a non-uint64 value we should write val as an int32 + useMinSize := ec.MinSize || (uic.EncodeToMinSize && val.Kind() != reflect.Uint64) + + if u64 <= math.MaxInt32 && useMinSize { + return vw.WriteInt32(int32(u64)) + } + if u64 > math.MaxInt64 { + return fmt.Errorf("%d overflows int64", u64) + } + return vw.WriteInt64(int64(u64)) + } + + return ValueEncoderError{ + Name: "UintEncodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } +} + +// DecodeValue is the ValueDecoder for uint types. +func (uic *UIntCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() { + return ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } + } + + var i64 int64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return err + } + i64 = int64(i32) + case bsontype.Int64: + i64, err = vr.ReadInt64() + if err != nil { + return err + } + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return err + } + if !dc.Truncate && math.Floor(f64) != f64 { + return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled") + } + if f64 > float64(math.MaxInt64) { + return fmt.Errorf("%g overflows int64", f64) + } + i64 = int64(f64) + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return err + } + if b { + i64 = 1 + } + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return err + } + default: + return fmt.Errorf("cannot decode %v into an integer type", vrType) + } + + switch val.Kind() { + case reflect.Uint8: + if i64 < 0 || i64 > math.MaxUint8 { + return fmt.Errorf("%d overflows uint8", i64) + } + case reflect.Uint16: + if i64 < 0 || i64 > math.MaxUint16 { + return fmt.Errorf("%d overflows uint16", i64) + } + case reflect.Uint32: + if i64 < 0 || i64 > math.MaxUint32 { + return fmt.Errorf("%d overflows uint32", i64) + } + case reflect.Uint64: + if i64 < 0 { + return fmt.Errorf("%d overflows uint64", i64) + } + case reflect.Uint: + if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint + return fmt.Errorf("%d overflows uint", i64) + } + default: + return ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } + } + + val.SetUint(uint64(i64)) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go new file mode 100644 index 0000000..b1256a4 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go @@ -0,0 +1,38 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding. +type ByteSliceCodecOptions struct { + EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. +} + +// ByteSliceCodec creates a new *ByteSliceCodecOptions +func ByteSliceCodec() *ByteSliceCodecOptions { + return &ByteSliceCodecOptions{} +} + +// SetEncodeNilAsEmpty specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. +func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions { + bs.EncodeNilAsEmpty = &b + return bs +} + +// MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion. +func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions { + bs := ByteSliceCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.EncodeNilAsEmpty != nil { + bs.EncodeNilAsEmpty = opt.EncodeNilAsEmpty + } + } + + return bs +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go new file mode 100644 index 0000000..6caaa00 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go @@ -0,0 +1,38 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding. +type EmptyInterfaceCodecOptions struct { + DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. +} + +// EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions +func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions { + return &EmptyInterfaceCodecOptions{} +} + +// SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. +func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions { + e.DecodeBinaryAsSlice = &b + return e +} + +// MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion. +func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions { + e := EmptyInterfaceCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.DecodeBinaryAsSlice != nil { + e.DecodeBinaryAsSlice = opt.DecodeBinaryAsSlice + } + } + + return e +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go new file mode 100644 index 0000000..1ac3e20 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go @@ -0,0 +1,48 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// MapCodecOptions represents all possible options for map encoding and decoding. +type MapCodecOptions struct { + DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false. + EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false. +} + +// MapCodec creates a new *MapCodecOptions +func MapCodec() *MapCodecOptions { + return &MapCodecOptions{} +} + +// SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false. +func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions { + t.DecodeZerosMap = &b + return t +} + +// SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false. +func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { + t.EncodeNilAsEmpty = &b + return t +} + +// MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion. +func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions { + s := MapCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.DecodeZerosMap != nil { + s.DecodeZerosMap = opt.DecodeZerosMap + } + if opt.EncodeNilAsEmpty != nil { + s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty + } + } + + return s +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go new file mode 100644 index 0000000..ef965e4 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go @@ -0,0 +1,38 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// SliceCodecOptions represents all possible options for slice encoding and decoding. +type SliceCodecOptions struct { + EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false. +} + +// SliceCodec creates a new *SliceCodecOptions +func SliceCodec() *SliceCodecOptions { + return &SliceCodecOptions{} +} + +// SetEncodeNilAsEmpty specifies if a nil slice should encode as an empty array instead of null. Defaults to false. +func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions { + s.EncodeNilAsEmpty = &b + return s +} + +// MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion. +func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions { + s := SliceCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.EncodeNilAsEmpty != nil { + s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty + } + } + + return s +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go new file mode 100644 index 0000000..65964f4 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go @@ -0,0 +1,41 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +var defaultDecodeOIDAsHex = true + +// StringCodecOptions represents all possible options for string encoding and decoding. +type StringCodecOptions struct { + DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true. +} + +// StringCodec creates a new *StringCodecOptions +func StringCodec() *StringCodecOptions { + return &StringCodecOptions{} +} + +// SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made +// from the raw object ID bytes will be used. Defaults to true. +func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions { + t.DecodeObjectIDAsHex = &b + return t +} + +// MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion. +func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions { + s := &StringCodecOptions{&defaultDecodeOIDAsHex} + for _, opt := range opts { + if opt == nil { + continue + } + if opt.DecodeObjectIDAsHex != nil { + s.DecodeObjectIDAsHex = opt.DecodeObjectIDAsHex + } + } + + return s +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go new file mode 100644 index 0000000..ad32c7c --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go @@ -0,0 +1,70 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// StructCodecOptions represents all possible options for struct encoding and decoding. +type StructCodecOptions struct { + DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false. + DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false. + EncodeOmitDefaultStruct *bool // Specifies if default structs should be considered empty by omitempty. Defaults to false. + AllowUnexportedFields *bool // Specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. +} + +// StructCodec creates a new *StructCodecOptions +func StructCodec() *StructCodecOptions { + return &StructCodecOptions{} +} + +// SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false. +func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions { + t.DecodeZeroStruct = &b + return t +} + +// SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false. +func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions { + t.DecodeDeepZeroInline = &b + return t +} + +// SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all +// its values set to their default value. Defaults to false. +func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions { + t.EncodeOmitDefaultStruct = &b + return t +} + +// SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. +func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions { + t.AllowUnexportedFields = &b + return t +} + +// MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion. +func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions { + s := StructCodec() + for _, opt := range opts { + if opt == nil { + continue + } + + if opt.DecodeZeroStruct != nil { + s.DecodeZeroStruct = opt.DecodeZeroStruct + } + if opt.DecodeDeepZeroInline != nil { + s.DecodeDeepZeroInline = opt.DecodeDeepZeroInline + } + if opt.EncodeOmitDefaultStruct != nil { + s.EncodeOmitDefaultStruct = opt.EncodeOmitDefaultStruct + } + if opt.AllowUnexportedFields != nil { + s.AllowUnexportedFields = opt.AllowUnexportedFields + } + } + + return s +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go new file mode 100644 index 0000000..13496d1 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go @@ -0,0 +1,38 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// TimeCodecOptions represents all possible options for time.Time encoding and decoding. +type TimeCodecOptions struct { + UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false. +} + +// TimeCodec creates a new *TimeCodecOptions +func TimeCodec() *TimeCodecOptions { + return &TimeCodecOptions{} +} + +// SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. +func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions { + t.UseLocalTimeZone = &b + return t +} + +// MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion. +func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions { + t := TimeCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.UseLocalTimeZone != nil { + t.UseLocalTimeZone = opt.UseLocalTimeZone + } + } + + return t +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go new file mode 100644 index 0000000..e08b7f1 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go @@ -0,0 +1,38 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// UIntCodecOptions represents all possible options for uint encoding and decoding. +type UIntCodecOptions struct { + EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. +} + +// UIntCodec creates a new *UIntCodecOptions +func UIntCodec() *UIntCodecOptions { + return &UIntCodecOptions{} +} + +// SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. +func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions { + u.EncodeToMinSize = &b + return u +} + +// MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion. +func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions { + u := UIntCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.EncodeToMinSize != nil { + u.EncodeToMinSize = opt.EncodeToMinSize + } + } + + return u +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go new file mode 100644 index 0000000..02e3a7e --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go @@ -0,0 +1,389 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "fmt" + "io" + + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +// Copier is a type that allows copying between ValueReaders, ValueWriters, and +// []byte values. +type Copier struct{} + +// NewCopier creates a new copier with the given registry. If a nil registry is provided +// a default registry is used. +func NewCopier() Copier { + return Copier{} +} + +// CopyDocument handles copying a document from src to dst. +func CopyDocument(dst ValueWriter, src ValueReader) error { + return Copier{}.CopyDocument(dst, src) +} + +// CopyDocument handles copying one document from the src to the dst. +func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { + dr, err := src.ReadDocument() + if err != nil { + return err + } + + dw, err := dst.WriteDocument() + if err != nil { + return err + } + + return c.copyDocumentCore(dw, dr) +} + +// CopyDocumentFromBytes copies the values from a BSON document represented as a +// []byte to a ValueWriter. +func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error { + dw, err := dst.WriteDocument() + if err != nil { + return err + } + + err = c.CopyBytesToDocumentWriter(dw, src) + if err != nil { + return err + } + + return dw.WriteDocumentEnd() +} + +// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a +// DocumentWriter. +func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error { + // TODO(skriptble): Create errors types here. Anything thats a tag should be a property. + length, rem, ok := bsoncore.ReadLength(src) + if !ok { + return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src)) + } + if len(src) < int(length) { + return fmt.Errorf("length read exceeds number of bytes available. length=%d bytes=%d", len(src), length) + } + rem = rem[:length-4] + + var t bsontype.Type + var key string + var val bsoncore.Value + for { + t, rem, ok = bsoncore.ReadType(rem) + if !ok { + return io.EOF + } + if t == bsontype.Type(0) { + if len(rem) != 0 { + return fmt.Errorf("document end byte found before end of document. remaining bytes=%v", rem) + } + break + } + + key, rem, ok = bsoncore.ReadKey(rem) + if !ok { + return fmt.Errorf("invalid key found. remaining bytes=%v", rem) + } + dvw, err := dst.WriteDocumentElement(key) + if err != nil { + return err + } + val, rem, ok = bsoncore.ReadValue(rem, t) + if !ok { + return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t) + } + err = c.CopyValueFromBytes(dvw, t, val.Data) + if err != nil { + return err + } + } + return nil +} + +// CopyDocumentToBytes copies an entire document from the ValueReader and +// returns it as bytes. +func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) { + return c.AppendDocumentBytes(nil, src) +} + +// AppendDocumentBytes functions the same as CopyDocumentToBytes, but will +// append the result to dst. +func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) { + if br, ok := src.(BytesReader); ok { + _, dst, err := br.ReadValueBytes(dst) + return dst, err + } + + vw := vwPool.Get().(*valueWriter) + defer vwPool.Put(vw) + + vw.reset(dst) + + err := c.CopyDocument(vw, src) + dst = vw.buf + return dst, err +} + +// CopyValueFromBytes will write the value represtend by t and src to dst. +func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error { + if wvb, ok := dst.(BytesWriter); ok { + return wvb.WriteValueBytes(t, src) + } + + vr := vrPool.Get().(*valueReader) + defer vrPool.Put(vr) + + vr.reset(src) + vr.pushElement(t) + + return c.CopyValue(dst, vr) +} + +// CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a +// []byte. +func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) { + return c.AppendValueBytes(nil, src) +} + +// AppendValueBytes functions the same as CopyValueToBytes, but will append the +// result to dst. +func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) { + if br, ok := src.(BytesReader); ok { + return br.ReadValueBytes(dst) + } + + vw := vwPool.Get().(*valueWriter) + defer vwPool.Put(vw) + + start := len(dst) + + vw.reset(dst) + vw.push(mElement) + + err := c.CopyValue(vw, src) + if err != nil { + return 0, dst, err + } + + return bsontype.Type(vw.buf[start]), vw.buf[start+2:], nil +} + +// CopyValue will copy a single value from src to dst. +func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error { + var err error + switch src.Type() { + case bsontype.Double: + var f64 float64 + f64, err = src.ReadDouble() + if err != nil { + break + } + err = dst.WriteDouble(f64) + case bsontype.String: + var str string + str, err = src.ReadString() + if err != nil { + return err + } + err = dst.WriteString(str) + case bsontype.EmbeddedDocument: + err = c.CopyDocument(dst, src) + case bsontype.Array: + err = c.copyArray(dst, src) + case bsontype.Binary: + var data []byte + var subtype byte + data, subtype, err = src.ReadBinary() + if err != nil { + break + } + err = dst.WriteBinaryWithSubtype(data, subtype) + case bsontype.Undefined: + err = src.ReadUndefined() + if err != nil { + break + } + err = dst.WriteUndefined() + case bsontype.ObjectID: + var oid primitive.ObjectID + oid, err = src.ReadObjectID() + if err != nil { + break + } + err = dst.WriteObjectID(oid) + case bsontype.Boolean: + var b bool + b, err = src.ReadBoolean() + if err != nil { + break + } + err = dst.WriteBoolean(b) + case bsontype.DateTime: + var dt int64 + dt, err = src.ReadDateTime() + if err != nil { + break + } + err = dst.WriteDateTime(dt) + case bsontype.Null: + err = src.ReadNull() + if err != nil { + break + } + err = dst.WriteNull() + case bsontype.Regex: + var pattern, options string + pattern, options, err = src.ReadRegex() + if err != nil { + break + } + err = dst.WriteRegex(pattern, options) + case bsontype.DBPointer: + var ns string + var pointer primitive.ObjectID + ns, pointer, err = src.ReadDBPointer() + if err != nil { + break + } + err = dst.WriteDBPointer(ns, pointer) + case bsontype.JavaScript: + var js string + js, err = src.ReadJavascript() + if err != nil { + break + } + err = dst.WriteJavascript(js) + case bsontype.Symbol: + var symbol string + symbol, err = src.ReadSymbol() + if err != nil { + break + } + err = dst.WriteSymbol(symbol) + case bsontype.CodeWithScope: + var code string + var srcScope DocumentReader + code, srcScope, err = src.ReadCodeWithScope() + if err != nil { + break + } + + var dstScope DocumentWriter + dstScope, err = dst.WriteCodeWithScope(code) + if err != nil { + break + } + err = c.copyDocumentCore(dstScope, srcScope) + case bsontype.Int32: + var i32 int32 + i32, err = src.ReadInt32() + if err != nil { + break + } + err = dst.WriteInt32(i32) + case bsontype.Timestamp: + var t, i uint32 + t, i, err = src.ReadTimestamp() + if err != nil { + break + } + err = dst.WriteTimestamp(t, i) + case bsontype.Int64: + var i64 int64 + i64, err = src.ReadInt64() + if err != nil { + break + } + err = dst.WriteInt64(i64) + case bsontype.Decimal128: + var d128 primitive.Decimal128 + d128, err = src.ReadDecimal128() + if err != nil { + break + } + err = dst.WriteDecimal128(d128) + case bsontype.MinKey: + err = src.ReadMinKey() + if err != nil { + break + } + err = dst.WriteMinKey() + case bsontype.MaxKey: + err = src.ReadMaxKey() + if err != nil { + break + } + err = dst.WriteMaxKey() + default: + err = fmt.Errorf("Cannot copy unknown BSON type %s", src.Type()) + } + + return err +} + +func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { + ar, err := src.ReadArray() + if err != nil { + return err + } + + aw, err := dst.WriteArray() + if err != nil { + return err + } + + for { + vr, err := ar.ReadValue() + if err == ErrEOA { + break + } + if err != nil { + return err + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + err = c.CopyValue(vw, vr) + if err != nil { + return err + } + } + + return aw.WriteArrayEnd() +} + +func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error { + for { + key, vr, err := dr.ReadElement() + if err == ErrEOD { + break + } + if err != nil { + return err + } + + vw, err := dw.WriteDocumentElement(key) + if err != nil { + return err + } + + err = c.CopyValue(vw, vr) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go new file mode 100644 index 0000000..750b0d2 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go @@ -0,0 +1,9 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsonrw contains abstractions for reading and writing +// BSON and BSON like types from sources. +package bsonrw // import "go.mongodb.org/mongo-driver/bson/bsonrw" diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go new file mode 100644 index 0000000..3ff17c1 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go @@ -0,0 +1,738 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "errors" + "fmt" + "io" + + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +const maxNestingDepth = 200 + +// ErrInvalidJSON indicates the JSON input is invalid +var ErrInvalidJSON = errors.New("invalid JSON input") + +type jsonParseState byte + +const ( + jpsStartState jsonParseState = iota + jpsSawBeginObject + jpsSawEndObject + jpsSawBeginArray + jpsSawEndArray + jpsSawColon + jpsSawComma + jpsSawKey + jpsSawValue + jpsDoneState + jpsInvalidState +) + +type jsonParseMode byte + +const ( + jpmInvalidMode jsonParseMode = iota + jpmObjectMode + jpmArrayMode +) + +type extJSONValue struct { + t bsontype.Type + v interface{} +} + +type extJSONObject struct { + keys []string + values []*extJSONValue +} + +type extJSONParser struct { + js *jsonScanner + s jsonParseState + m []jsonParseMode + k string + v *extJSONValue + + err error + canonical bool + depth int + maxDepth int + + emptyObject bool +} + +// newExtJSONParser returns a new extended JSON parser, ready to to begin +// parsing from the first character of the argued json input. It will not +// perform any read-ahead and will therefore not report any errors about +// malformed JSON at this point. +func newExtJSONParser(r io.Reader, canonical bool) *extJSONParser { + return &extJSONParser{ + js: &jsonScanner{r: r}, + s: jpsStartState, + m: []jsonParseMode{}, + canonical: canonical, + maxDepth: maxNestingDepth, + } +} + +// peekType examines the next value and returns its BSON Type +func (ejp *extJSONParser) peekType() (bsontype.Type, error) { + var t bsontype.Type + var err error + initialState := ejp.s + + ejp.advanceState() + switch ejp.s { + case jpsSawValue: + t = ejp.v.t + case jpsSawBeginArray: + t = bsontype.Array + case jpsInvalidState: + err = ejp.err + case jpsSawComma: + // in array mode, seeing a comma means we need to progress again to actually observe a type + if ejp.peekMode() == jpmArrayMode { + return ejp.peekType() + } + case jpsSawEndArray: + // this would only be a valid state if we were in array mode, so return end-of-array error + err = ErrEOA + case jpsSawBeginObject: + // peek key to determine type + ejp.advanceState() + switch ejp.s { + case jpsSawEndObject: // empty embedded document + t = bsontype.EmbeddedDocument + ejp.emptyObject = true + case jpsInvalidState: + err = ejp.err + case jpsSawKey: + if initialState == jpsStartState { + return bsontype.EmbeddedDocument, nil + } + t = wrapperKeyBSONType(ejp.k) + + switch t { + case bsontype.JavaScript: + // just saw $code, need to check for $scope at same level + _, err = ejp.readValue(bsontype.JavaScript) + if err != nil { + break + } + + switch ejp.s { + case jpsSawEndObject: // type is TypeJavaScript + case jpsSawComma: + ejp.advanceState() + + if ejp.s == jpsSawKey && ejp.k == "$scope" { + t = bsontype.CodeWithScope + } else { + err = fmt.Errorf("invalid extended JSON: unexpected key %s in CodeWithScope object", ejp.k) + } + case jpsInvalidState: + err = ejp.err + default: + err = ErrInvalidJSON + } + case bsontype.CodeWithScope: + err = errors.New("invalid extended JSON: code with $scope must contain $code before $scope") + } + } + } + + return t, err +} + +// readKey parses the next key and its type and returns them +func (ejp *extJSONParser) readKey() (string, bsontype.Type, error) { + if ejp.emptyObject { + ejp.emptyObject = false + return "", 0, ErrEOD + } + + // advance to key (or return with error) + switch ejp.s { + case jpsStartState: + ejp.advanceState() + if ejp.s == jpsSawBeginObject { + ejp.advanceState() + } + case jpsSawBeginObject: + ejp.advanceState() + case jpsSawValue, jpsSawEndObject, jpsSawEndArray: + ejp.advanceState() + switch ejp.s { + case jpsSawBeginObject, jpsSawComma: + ejp.advanceState() + case jpsSawEndObject: + return "", 0, ErrEOD + case jpsDoneState: + return "", 0, io.EOF + case jpsInvalidState: + return "", 0, ejp.err + default: + return "", 0, ErrInvalidJSON + } + case jpsSawKey: // do nothing (key was peeked before) + default: + return "", 0, invalidRequestError("key") + } + + // read key + var key string + + switch ejp.s { + case jpsSawKey: + key = ejp.k + case jpsSawEndObject: + return "", 0, ErrEOD + case jpsInvalidState: + return "", 0, ejp.err + default: + return "", 0, invalidRequestError("key") + } + + // check for colon + ejp.advanceState() + if err := ensureColon(ejp.s, key); err != nil { + return "", 0, err + } + + // peek at the value to determine type + t, err := ejp.peekType() + if err != nil { + return "", 0, err + } + + return key, t, nil +} + +// readValue returns the value corresponding to the Type returned by peekType +func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { + if ejp.s == jpsInvalidState { + return nil, ejp.err + } + + var v *extJSONValue + + switch t { + case bsontype.Null, bsontype.Boolean, bsontype.String: + if ejp.s != jpsSawValue { + return nil, invalidRequestError(t.String()) + } + v = ejp.v + case bsontype.Int32, bsontype.Int64, bsontype.Double: + // relaxed version allows these to be literal number values + if ejp.s == jpsSawValue { + v = ejp.v + break + } + fallthrough + case bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID, bsontype.MinKey, bsontype.MaxKey, bsontype.Undefined: + switch ejp.s { + case jpsSawKey: + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + // read value + ejp.advanceState() + if ejp.s != jpsSawValue || !ejp.ensureExtValueType(t) { + return nil, invalidJSONErrorForType("value", t) + } + + v = ejp.v + + // read end object + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("} after value", t) + } + default: + return nil, invalidRequestError(t.String()) + } + case bsontype.Binary, bsontype.Regex, bsontype.Timestamp, bsontype.DBPointer: + if ejp.s != jpsSawKey { + return nil, invalidRequestError(t.String()) + } + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + ejp.advanceState() + if t == bsontype.Binary && ejp.s == jpsSawValue { + // convert legacy $binary format + base64 := ejp.v + + ejp.advanceState() + if ejp.s != jpsSawComma { + return nil, invalidJSONErrorForType(",", bsontype.Binary) + } + + ejp.advanceState() + key, t, err := ejp.readKey() + if err != nil { + return nil, err + } + if key != "$type" { + return nil, invalidJSONErrorForType("$type", bsontype.Binary) + } + + subType, err := ejp.readValue(t) + if err != nil { + return nil, err + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("2 key-value pairs and then }", bsontype.Binary) + } + + v = &extJSONValue{ + t: bsontype.EmbeddedDocument, + v: &extJSONObject{ + keys: []string{"base64", "subType"}, + values: []*extJSONValue{base64, subType}, + }, + } + break + } + + // read KV pairs + if ejp.s != jpsSawBeginObject { + return nil, invalidJSONErrorForType("{", t) + } + + keys, vals, err := ejp.readObject(2, true) + if err != nil { + return nil, err + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("2 key-value pairs and then }", t) + } + + v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} + + case bsontype.DateTime: + switch ejp.s { + case jpsSawValue: + v = ejp.v + case jpsSawKey: + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + ejp.advanceState() + switch ejp.s { + case jpsSawBeginObject: + keys, vals, err := ejp.readObject(1, true) + if err != nil { + return nil, err + } + v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} + case jpsSawValue: + if ejp.canonical { + return nil, invalidJSONError("{") + } + v = ejp.v + default: + if ejp.canonical { + return nil, invalidJSONErrorForType("object", t) + } + return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as decribed in RFC-3339", t) + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("value and then }", t) + } + default: + return nil, invalidRequestError(t.String()) + } + case bsontype.JavaScript: + switch ejp.s { + case jpsSawKey: + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + // read value + ejp.advanceState() + if ejp.s != jpsSawValue { + return nil, invalidJSONErrorForType("value", t) + } + v = ejp.v + + // read end object or comma and just return + ejp.advanceState() + case jpsSawEndObject: + v = ejp.v + default: + return nil, invalidRequestError(t.String()) + } + case bsontype.CodeWithScope: + if ejp.s == jpsSawKey && ejp.k == "$scope" { + v = ejp.v // this is the $code string from earlier + + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + // read { + ejp.advanceState() + if ejp.s != jpsSawBeginObject { + return nil, invalidJSONError("$scope to be embedded document") + } + } else { + return nil, invalidRequestError(t.String()) + } + case bsontype.EmbeddedDocument, bsontype.Array: + return nil, invalidRequestError(t.String()) + } + + return v, nil +} + +// readObject is a utility method for reading full objects of known (or expected) size +// it is useful for extended JSON types such as binary, datetime, regex, and timestamp +func (ejp *extJSONParser) readObject(numKeys int, started bool) ([]string, []*extJSONValue, error) { + keys := make([]string, numKeys) + vals := make([]*extJSONValue, numKeys) + + if !started { + ejp.advanceState() + if ejp.s != jpsSawBeginObject { + return nil, nil, invalidJSONError("{") + } + } + + for i := 0; i < numKeys; i++ { + key, t, err := ejp.readKey() + if err != nil { + return nil, nil, err + } + + switch ejp.s { + case jpsSawKey: + v, err := ejp.readValue(t) + if err != nil { + return nil, nil, err + } + + keys[i] = key + vals[i] = v + case jpsSawValue: + keys[i] = key + vals[i] = ejp.v + default: + return nil, nil, invalidJSONError("value") + } + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, nil, invalidJSONError("}") + } + + return keys, vals, nil +} + +// advanceState reads the next JSON token from the scanner and transitions +// from the current state based on that token's type +func (ejp *extJSONParser) advanceState() { + if ejp.s == jpsDoneState || ejp.s == jpsInvalidState { + return + } + + jt, err := ejp.js.nextToken() + + if err != nil { + ejp.err = err + ejp.s = jpsInvalidState + return + } + + valid := ejp.validateToken(jt.t) + if !valid { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + return + } + + switch jt.t { + case jttBeginObject: + ejp.s = jpsSawBeginObject + ejp.pushMode(jpmObjectMode) + ejp.depth++ + + if ejp.depth > ejp.maxDepth { + ejp.err = nestingDepthError(jt.p, ejp.depth) + ejp.s = jpsInvalidState + } + case jttEndObject: + ejp.s = jpsSawEndObject + ejp.depth-- + + if ejp.popMode() != jpmObjectMode { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + } + case jttBeginArray: + ejp.s = jpsSawBeginArray + ejp.pushMode(jpmArrayMode) + case jttEndArray: + ejp.s = jpsSawEndArray + + if ejp.popMode() != jpmArrayMode { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + } + case jttColon: + ejp.s = jpsSawColon + case jttComma: + ejp.s = jpsSawComma + case jttEOF: + ejp.s = jpsDoneState + if len(ejp.m) != 0 { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + } + case jttString: + switch ejp.s { + case jpsSawComma: + if ejp.peekMode() == jpmArrayMode { + ejp.s = jpsSawValue + ejp.v = extendJSONToken(jt) + return + } + fallthrough + case jpsSawBeginObject: + ejp.s = jpsSawKey + ejp.k = jt.v.(string) + return + } + fallthrough + default: + ejp.s = jpsSawValue + ejp.v = extendJSONToken(jt) + } +} + +var jpsValidTransitionTokens = map[jsonParseState]map[jsonTokenType]bool{ + jpsStartState: { + jttBeginObject: true, + jttBeginArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + jttEOF: true, + }, + jpsSawBeginObject: { + jttEndObject: true, + jttString: true, + }, + jpsSawEndObject: { + jttEndObject: true, + jttEndArray: true, + jttComma: true, + jttEOF: true, + }, + jpsSawBeginArray: { + jttBeginObject: true, + jttBeginArray: true, + jttEndArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + }, + jpsSawEndArray: { + jttEndObject: true, + jttEndArray: true, + jttComma: true, + jttEOF: true, + }, + jpsSawColon: { + jttBeginObject: true, + jttBeginArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + }, + jpsSawComma: { + jttBeginObject: true, + jttBeginArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + }, + jpsSawKey: { + jttColon: true, + }, + jpsSawValue: { + jttEndObject: true, + jttEndArray: true, + jttComma: true, + jttEOF: true, + }, + jpsDoneState: {}, + jpsInvalidState: {}, +} + +func (ejp *extJSONParser) validateToken(jtt jsonTokenType) bool { + switch ejp.s { + case jpsSawEndObject: + // if we are at depth zero and the next token is a '{', + // we can consider it valid only if we are not in array mode. + if jtt == jttBeginObject && ejp.depth == 0 { + return ejp.peekMode() != jpmArrayMode + } + case jpsSawComma: + switch ejp.peekMode() { + // the only valid next token after a comma inside a document is a string (a key) + case jpmObjectMode: + return jtt == jttString + case jpmInvalidMode: + return false + } + } + + _, ok := jpsValidTransitionTokens[ejp.s][jtt] + return ok +} + +// ensureExtValueType returns true if the current value has the expected +// value type for single-key extended JSON types. For example, +// {"$numberInt": v} v must be TypeString +func (ejp *extJSONParser) ensureExtValueType(t bsontype.Type) bool { + switch t { + case bsontype.MinKey, bsontype.MaxKey: + return ejp.v.t == bsontype.Int32 + case bsontype.Undefined: + return ejp.v.t == bsontype.Boolean + case bsontype.Int32, bsontype.Int64, bsontype.Double, bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID: + return ejp.v.t == bsontype.String + default: + return false + } +} + +func (ejp *extJSONParser) pushMode(m jsonParseMode) { + ejp.m = append(ejp.m, m) +} + +func (ejp *extJSONParser) popMode() jsonParseMode { + l := len(ejp.m) + if l == 0 { + return jpmInvalidMode + } + + m := ejp.m[l-1] + ejp.m = ejp.m[:l-1] + + return m +} + +func (ejp *extJSONParser) peekMode() jsonParseMode { + l := len(ejp.m) + if l == 0 { + return jpmInvalidMode + } + + return ejp.m[l-1] +} + +func extendJSONToken(jt *jsonToken) *extJSONValue { + var t bsontype.Type + + switch jt.t { + case jttInt32: + t = bsontype.Int32 + case jttInt64: + t = bsontype.Int64 + case jttDouble: + t = bsontype.Double + case jttString: + t = bsontype.String + case jttBool: + t = bsontype.Boolean + case jttNull: + t = bsontype.Null + default: + return nil + } + + return &extJSONValue{t: t, v: jt.v} +} + +func ensureColon(s jsonParseState, key string) error { + if s != jpsSawColon { + return fmt.Errorf("invalid JSON input: missing colon after key \"%s\"", key) + } + + return nil +} + +func invalidRequestError(s string) error { + return fmt.Errorf("invalid request to read %s", s) +} + +func invalidJSONError(expected string) error { + return fmt.Errorf("invalid JSON input; expected %s", expected) +} + +func invalidJSONErrorForType(expected string, t bsontype.Type) error { + return fmt.Errorf("invalid JSON input; expected %s for %s", expected, t) +} + +func unexpectedTokenError(jt *jsonToken) error { + switch jt.t { + case jttInt32, jttInt64, jttDouble: + return fmt.Errorf("invalid JSON input; unexpected number (%v) at position %d", jt.v, jt.p) + case jttString: + return fmt.Errorf("invalid JSON input; unexpected string (\"%v\") at position %d", jt.v, jt.p) + case jttBool: + return fmt.Errorf("invalid JSON input; unexpected boolean literal (%v) at position %d", jt.v, jt.p) + case jttNull: + return fmt.Errorf("invalid JSON input; unexpected null literal at position %d", jt.p) + case jttEOF: + return fmt.Errorf("invalid JSON input; unexpected end of input at position %d", jt.p) + default: + return fmt.Errorf("invalid JSON input; unexpected %c at position %d", jt.v.(byte), jt.p) + } +} + +func nestingDepthError(p, depth int) error { + return fmt.Errorf("invalid JSON input; nesting too deep (%d levels) at position %d", depth, p) +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go new file mode 100644 index 0000000..dd560c9 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go @@ -0,0 +1,659 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "fmt" + "io" + "sync" + + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +// ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON. +type ExtJSONValueReaderPool struct { + pool sync.Pool +} + +// NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool. +func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { + return &ExtJSONValueReaderPool{ + pool: sync.Pool{ + New: func() interface{} { + return new(extJSONValueReader) + }, + }, + } +} + +// Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON. +func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) { + vr := bvrp.pool.Get().(*extJSONValueReader) + return vr.reset(r, canonical) +} + +// Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing +// is inserted into the pool and ok will be false. +func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) { + bvr, ok := vr.(*extJSONValueReader) + if !ok { + return false + } + + bvr, _ = bvr.reset(nil, false) + bvrp.pool.Put(bvr) + return true +} + +type ejvrState struct { + mode mode + vType bsontype.Type + depth int +} + +// extJSONValueReader is for reading extended JSON. +type extJSONValueReader struct { + p *extJSONParser + + stack []ejvrState + frame int +} + +// NewExtJSONValueReader creates a new ValueReader from a given io.Reader +// It will interpret the JSON of r as canonical or relaxed according to the +// given canonical flag +func NewExtJSONValueReader(r io.Reader, canonical bool) (ValueReader, error) { + return newExtJSONValueReader(r, canonical) +} + +func newExtJSONValueReader(r io.Reader, canonical bool) (*extJSONValueReader, error) { + ejvr := new(extJSONValueReader) + return ejvr.reset(r, canonical) +} + +func (ejvr *extJSONValueReader) reset(r io.Reader, canonical bool) (*extJSONValueReader, error) { + p := newExtJSONParser(r, canonical) + typ, err := p.peekType() + + if err != nil { + return nil, ErrInvalidJSON + } + + var m mode + switch typ { + case bsontype.EmbeddedDocument: + m = mTopLevel + case bsontype.Array: + m = mArray + default: + m = mValue + } + + stack := make([]ejvrState, 1, 5) + stack[0] = ejvrState{ + mode: m, + vType: typ, + } + return &extJSONValueReader{ + p: p, + stack: stack, + }, nil +} + +func (ejvr *extJSONValueReader) advanceFrame() { + if ejvr.frame+1 >= len(ejvr.stack) { // We need to grow the stack + length := len(ejvr.stack) + if length+1 >= cap(ejvr.stack) { + // double it + buf := make([]ejvrState, 2*cap(ejvr.stack)+1) + copy(buf, ejvr.stack) + ejvr.stack = buf + } + ejvr.stack = ejvr.stack[:length+1] + } + ejvr.frame++ + + // Clean the stack + ejvr.stack[ejvr.frame].mode = 0 + ejvr.stack[ejvr.frame].vType = 0 + ejvr.stack[ejvr.frame].depth = 0 +} + +func (ejvr *extJSONValueReader) pushDocument() { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = mDocument + ejvr.stack[ejvr.frame].depth = ejvr.p.depth +} + +func (ejvr *extJSONValueReader) pushCodeWithScope() { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = mCodeWithScope +} + +func (ejvr *extJSONValueReader) pushArray() { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = mArray +} + +func (ejvr *extJSONValueReader) push(m mode, t bsontype.Type) { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = m + ejvr.stack[ejvr.frame].vType = t +} + +func (ejvr *extJSONValueReader) pop() { + switch ejvr.stack[ejvr.frame].mode { + case mElement, mValue: + ejvr.frame-- + case mDocument, mArray, mCodeWithScope: + ejvr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc... + } +} + +func (ejvr *extJSONValueReader) skipDocument() error { + // read entire document until ErrEOD (using readKey and readValue) + _, typ, err := ejvr.p.readKey() + for err == nil { + _, err = ejvr.p.readValue(typ) + if err != nil { + break + } + + _, typ, err = ejvr.p.readKey() + } + + return err +} + +func (ejvr *extJSONValueReader) skipArray() error { + // read entire array until ErrEOA (using peekType) + _, err := ejvr.p.peekType() + for err == nil { + _, err = ejvr.p.peekType() + } + + return err +} + +func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error { + te := TransitionError{ + name: name, + current: ejvr.stack[ejvr.frame].mode, + destination: destination, + modes: modes, + action: "read", + } + if ejvr.frame != 0 { + te.parent = ejvr.stack[ejvr.frame-1].mode + } + return te +} + +func (ejvr *extJSONValueReader) typeError(t bsontype.Type) error { + return fmt.Errorf("positioned on %s, but attempted to read %s", ejvr.stack[ejvr.frame].vType, t) +} + +func (ejvr *extJSONValueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string, addModes ...mode) error { + switch ejvr.stack[ejvr.frame].mode { + case mElement, mValue: + if ejvr.stack[ejvr.frame].vType != t { + return ejvr.typeError(t) + } + default: + modes := []mode{mElement, mValue} + if addModes != nil { + modes = append(modes, addModes...) + } + return ejvr.invalidTransitionErr(destination, callerName, modes) + } + + return nil +} + +func (ejvr *extJSONValueReader) Type() bsontype.Type { + return ejvr.stack[ejvr.frame].vType +} + +func (ejvr *extJSONValueReader) Skip() error { + switch ejvr.stack[ejvr.frame].mode { + case mElement, mValue: + default: + return ejvr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue}) + } + + defer ejvr.pop() + + t := ejvr.stack[ejvr.frame].vType + switch t { + case bsontype.Array: + // read entire array until ErrEOA + err := ejvr.skipArray() + if err != ErrEOA { + return err + } + case bsontype.EmbeddedDocument: + // read entire doc until ErrEOD + err := ejvr.skipDocument() + if err != ErrEOD { + return err + } + case bsontype.CodeWithScope: + // read the code portion and set up parser in document mode + _, err := ejvr.p.readValue(t) + if err != nil { + return err + } + + // read until ErrEOD + err = ejvr.skipDocument() + if err != ErrEOD { + return err + } + default: + _, err := ejvr.p.readValue(t) + if err != nil { + return err + } + } + + return nil +} + +func (ejvr *extJSONValueReader) ReadArray() (ArrayReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mTopLevel: // allow reading array from top level + case mArray: + return ejvr, nil + default: + if err := ejvr.ensureElementValue(bsontype.Array, mArray, "ReadArray", mTopLevel, mArray); err != nil { + return nil, err + } + } + + ejvr.pushArray() + + return ejvr, nil +} + +func (ejvr *extJSONValueReader) ReadBinary() (b []byte, btype byte, err error) { + if err := ejvr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil { + return nil, 0, err + } + + v, err := ejvr.p.readValue(bsontype.Binary) + if err != nil { + return nil, 0, err + } + + b, btype, err = v.parseBinary() + + ejvr.pop() + return b, btype, err +} + +func (ejvr *extJSONValueReader) ReadBoolean() (bool, error) { + if err := ejvr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil { + return false, err + } + + v, err := ejvr.p.readValue(bsontype.Boolean) + if err != nil { + return false, err + } + + if v.t != bsontype.Boolean { + return false, fmt.Errorf("expected type bool, but got type %s", v.t) + } + + ejvr.pop() + return v.v.(bool), nil +} + +func (ejvr *extJSONValueReader) ReadDocument() (DocumentReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mTopLevel: + return ejvr, nil + case mElement, mValue: + if ejvr.stack[ejvr.frame].vType != bsontype.EmbeddedDocument { + return nil, ejvr.typeError(bsontype.EmbeddedDocument) + } + + ejvr.pushDocument() + return ejvr, nil + default: + return nil, ejvr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue}) + } +} + +func (ejvr *extJSONValueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) { + if err = ejvr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil { + return "", nil, err + } + + v, err := ejvr.p.readValue(bsontype.CodeWithScope) + if err != nil { + return "", nil, err + } + + code, err = v.parseJavascript() + + ejvr.pushCodeWithScope() + return code, ejvr, err +} + +func (ejvr *extJSONValueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) { + if err = ejvr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil { + return "", primitive.NilObjectID, err + } + + v, err := ejvr.p.readValue(bsontype.DBPointer) + if err != nil { + return "", primitive.NilObjectID, err + } + + ns, oid, err = v.parseDBPointer() + + ejvr.pop() + return ns, oid, err +} + +func (ejvr *extJSONValueReader) ReadDateTime() (int64, error) { + if err := ejvr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.DateTime) + if err != nil { + return 0, err + } + + d, err := v.parseDateTime() + + ejvr.pop() + return d, err +} + +func (ejvr *extJSONValueReader) ReadDecimal128() (primitive.Decimal128, error) { + if err := ejvr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil { + return primitive.Decimal128{}, err + } + + v, err := ejvr.p.readValue(bsontype.Decimal128) + if err != nil { + return primitive.Decimal128{}, err + } + + d, err := v.parseDecimal128() + + ejvr.pop() + return d, err +} + +func (ejvr *extJSONValueReader) ReadDouble() (float64, error) { + if err := ejvr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.Double) + if err != nil { + return 0, err + } + + d, err := v.parseDouble() + + ejvr.pop() + return d, err +} + +func (ejvr *extJSONValueReader) ReadInt32() (int32, error) { + if err := ejvr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.Int32) + if err != nil { + return 0, err + } + + i, err := v.parseInt32() + + ejvr.pop() + return i, err +} + +func (ejvr *extJSONValueReader) ReadInt64() (int64, error) { + if err := ejvr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.Int64) + if err != nil { + return 0, err + } + + i, err := v.parseInt64() + + ejvr.pop() + return i, err +} + +func (ejvr *extJSONValueReader) ReadJavascript() (code string, err error) { + if err = ejvr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil { + return "", err + } + + v, err := ejvr.p.readValue(bsontype.JavaScript) + if err != nil { + return "", err + } + + code, err = v.parseJavascript() + + ejvr.pop() + return code, err +} + +func (ejvr *extJSONValueReader) ReadMaxKey() error { + if err := ejvr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.MaxKey) + if err != nil { + return err + } + + err = v.parseMinMaxKey("max") + + ejvr.pop() + return err +} + +func (ejvr *extJSONValueReader) ReadMinKey() error { + if err := ejvr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.MinKey) + if err != nil { + return err + } + + err = v.parseMinMaxKey("min") + + ejvr.pop() + return err +} + +func (ejvr *extJSONValueReader) ReadNull() error { + if err := ejvr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.Null) + if err != nil { + return err + } + + if v.t != bsontype.Null { + return fmt.Errorf("expected type null but got type %s", v.t) + } + + ejvr.pop() + return nil +} + +func (ejvr *extJSONValueReader) ReadObjectID() (primitive.ObjectID, error) { + if err := ejvr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil { + return primitive.ObjectID{}, err + } + + v, err := ejvr.p.readValue(bsontype.ObjectID) + if err != nil { + return primitive.ObjectID{}, err + } + + oid, err := v.parseObjectID() + + ejvr.pop() + return oid, err +} + +func (ejvr *extJSONValueReader) ReadRegex() (pattern string, options string, err error) { + if err = ejvr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil { + return "", "", err + } + + v, err := ejvr.p.readValue(bsontype.Regex) + if err != nil { + return "", "", err + } + + pattern, options, err = v.parseRegex() + + ejvr.pop() + return pattern, options, err +} + +func (ejvr *extJSONValueReader) ReadString() (string, error) { + if err := ejvr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil { + return "", err + } + + v, err := ejvr.p.readValue(bsontype.String) + if err != nil { + return "", err + } + + if v.t != bsontype.String { + return "", fmt.Errorf("expected type string but got type %s", v.t) + } + + ejvr.pop() + return v.v.(string), nil +} + +func (ejvr *extJSONValueReader) ReadSymbol() (symbol string, err error) { + if err = ejvr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil { + return "", err + } + + v, err := ejvr.p.readValue(bsontype.Symbol) + if err != nil { + return "", err + } + + symbol, err = v.parseSymbol() + + ejvr.pop() + return symbol, err +} + +func (ejvr *extJSONValueReader) ReadTimestamp() (t uint32, i uint32, err error) { + if err = ejvr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil { + return 0, 0, err + } + + v, err := ejvr.p.readValue(bsontype.Timestamp) + if err != nil { + return 0, 0, err + } + + t, i, err = v.parseTimestamp() + + ejvr.pop() + return t, i, err +} + +func (ejvr *extJSONValueReader) ReadUndefined() error { + if err := ejvr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.Undefined) + if err != nil { + return err + } + + err = v.parseUndefined() + + ejvr.pop() + return err +} + +func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mTopLevel, mDocument, mCodeWithScope: + default: + return "", nil, ejvr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope}) + } + + name, t, err := ejvr.p.readKey() + + if err != nil { + if err == ErrEOD { + if ejvr.stack[ejvr.frame].mode == mCodeWithScope { + _, err := ejvr.p.peekType() + if err != nil { + return "", nil, err + } + } + + ejvr.pop() + } + + return "", nil, err + } + + ejvr.push(mElement, t) + return name, ejvr, nil +} + +func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mArray: + default: + return nil, ejvr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray}) + } + + t, err := ejvr.p.peekType() + if err != nil { + if err == ErrEOA { + ejvr.pop() + } + + return nil, err + } + + ejvr.push(mValue, t) + return ejvr, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go new file mode 100644 index 0000000..ba39c96 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go @@ -0,0 +1,223 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Based on github.com/golang/go by The Go Authors +// See THIRD-PARTY-NOTICES for original license terms. + +package bsonrw + +import "unicode/utf8" + +// safeSet holds the value true if the ASCII character with the given array +// position can be represented inside a JSON string without any further +// escaping. +// +// All values are true except for the ASCII control characters (0-31), the +// double quote ("), and the backslash character ("\"). +var safeSet = [utf8.RuneSelf]bool{ + ' ': true, + '!': true, + '"': false, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + ',': true, + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + ';': true, + '<': true, + '=': true, + '>': true, + '?': true, + '@': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + '[': true, + '\\': false, + ']': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '{': true, + '|': true, + '}': true, + '~': true, + '\u007f': true, +} + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML